Example #1
0
ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
{
	struct rtlx_channel *rt;
	size_t fl;

	if (rtlx == NULL)
		return(-ENOSYS);

	rt = &rtlx->channel[index];

	/* total number of bytes to copy */
	count = min(count,
		    (size_t)write_spacefree(rt->rt_read, rt->rt_write,
					    rt->buffer_size));

	/* first bit from write pointer to the end of the buffer, or count */
	fl = min(count, (size_t) rt->buffer_size - rt->rt_write);

	copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);

	/* if there's any left copy to the beginning of the buffer */
	if( count - fl )
		copy_from (rt->rt_buffer, buffer + fl, count - fl, user);

	rt->rt_write += count;
	rt->rt_write %= rt->buffer_size;

	return(count);
}
Example #2
0
  JsonData& operator = ( JsonData const& rhs )
  {
    free();
    copy_from( rhs );

    return *this;
  }
Example #3
0
  constant& constant::operator=(const constant& rhs)
  {
    if (this != &rhs)
      copy_from(rhs);

    return *this;
  }
Example #4
0
 __host__
 inline TextureArray(const T *data, const Vector2i& sizes, size_t pitch,
                     cudaMemcpyKind kind = cudaMemcpyHostToDevice)
   : self_type{ sizes }
 {
   copy_from(data, sizes, pitch, kind);
 }
void GraphicConfig::interpolate(GraphicConfig &prev, 
	GraphicConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	double next_scale = (double)(current_frame - prev_frame) / (next_frame - prev_frame);
	double prev_scale = (double)(next_frame - current_frame) / (next_frame - prev_frame);

// Get current set of points from previous configuration
	copy_from(prev);
	

// Interpolate between current set of points and next set
	for(int i = 0; i < MIN(next.points.size(), points.size()); i++)
	{
		points.get(i)->freq = (int)(prev.points.get(i)->freq *
			prev_scale +
			next.points.get(i)->freq *
			next_scale);
		points.get(i)->value = prev.points.get(i)->value *
			prev_scale +
			next.points.get(i)->value *
			next_scale;
	}
}
Example #6
0
bool DistributedObject::update(const DistributedLock& dist_lock, UpgradableReadLock& lock)
{
	lock.check_read(get_local_mutex());

	if (!persistence_enabled_)
		return false;

	throw std::runtime_error("Persistence currently not implemented");
#if 0
	if (&dist_lock.get_object() != this)
		throw std::runtime_error("Distributed lock has incorrect object");
	if (file_.string().empty())
		throw std::runtime_error("Distributed object has no associated file");

	try
	{
		{
			flush_cache();

			/* load from the file */
			std::ifstream file_stream(get_file().string().c_str());
			if (!file_stream)
				throw bfs::filesystem_error("open failed", get_file(), bs::error_code(errno, boost::system::posix_category));

			/* see if the file has changed, do nothing if it hasn't */
			time_t write_time = bfs::last_write_time(file_);
			if (write_time == timestamp_)
				return false;

			/* create a temporary instance to which the file will be loaded */
			DistributedObjectPtr temp_obj = boost::dynamic_pointer_cast<DistributedObject>(create_empty_instance());
			if (!temp_obj)
				throw std::runtime_error("Distributed object failed to create temporary instance");

			/* need an exclusive lock on the temporary object */
			BlockWriteLock temp_obj_lock(*temp_obj);

			InputArchive file_arch(file_stream);
			temp_obj->do_load(file_arch, temp_obj_lock);

			/* temporarily upgrade to a write lock for our local object and then apply the updates */
			BlockWriteLock write_lock(lock);
			copy_from(temp_obj, temp_obj_lock, write_lock);
		}

		/* update the timestamp */
		timestamp_ = bfs::last_write_time(file_);
	}
	catch (bfs::filesystem_error& e)
	{
		throw distributed_object_error("file error: " + std::string(e.what()));
	}
	catch (boost::archive::archive_exception& e)
	{
		throw distributed_object_error("deserialization error: " + std::string(e.what()));
	}
#endif
	return true;
}
Example #7
0
void _1080to540Config::interpolate(_1080to540Config &prev, 
	_1080to540Config &next, 
	long prev_frame, 
	long next_frame, 
	long current_frame)
{
	copy_from(prev);
}
void CompressorConfig::interpolate(CompressorConfig &prev, 
	CompressorConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	copy_from(prev);
}
Example #9
0
void DeInterlaceConfig::interpolate(DeInterlaceConfig &prev, 
	DeInterlaceConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	copy_from(prev);
}
Example #10
0
void PhotoScaleConfig::interpolate(PhotoScaleConfig &prev, 
	PhotoScaleConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	copy_from(next);
}
void FindObjectConfig::interpolate(FindObjectConfig &prev, 
	FindObjectConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	copy_from(prev);
}
Example #12
0
void PluginAClientConfig::interpolate(PluginAClientConfig &prev, 
	PluginAClientConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	copy_from(prev);
}
Example #13
0
  // copy construct into state 1, always.
  // This is a choice, even if X is state 2 (a numpy).
  // We copy a numpy into a regular C++ array, which can then be used at max speed.
  mem_block (mem_block const & X): size_(X.size()), py_numpy(nullptr), py_guard(nullptr) {
  try { p = new ValueType[X.size()];}
   catch (std::bad_alloc& ba) { TRIQS_RUNTIME_ERROR<< "Memory allocation error in memblock copy construction. Size :"<<X.size() << "  bad_alloc error : "<< ba.what();}
   TRACE_MEM_DEBUG("Allocating from C++ a block of size "<< X.size() << " at address " <<p);
   TRIQS_MEMORY_USED_INC(X.size());
   ref_count=1;
   weak_ref_count =0;
   // now we copy the data
#ifndef TRIQS_WITH_PYTHON_SUPPORT
   copy_from(X);
#else
   // if X is in state 1 or 3
   if (X.py_numpy==nullptr) { copy_from(X); }
   else { // X was in state 2
    // else make a new copy of the numpy ...
    import_numpy_array();
    if (!is_scalar_or_pod<ValueType>::value) TRIQS_RUNTIME_ERROR << "Internal Error : memcpy on non-scalar";
#ifdef TRIQS_NUMPY_VERSION_LT_17
    PyObject * arr3 = X.py_numpy;
#else
    // STRANGE : uncommenting this leads to a segfault on mac ???
    // TO BE INVESTIGATED, IT IS NOT NORMAL
    //if (!PyArray_Check(X.py_numpy)) TRIQS_RUNTIME_ERROR<<"Internal error : is not an array";
    PyArrayObject * arr3 = (PyArrayObject *)(X.py_numpy);
#endif
    // if we can make a memcpy, do it.
    if ( ( PyArray_ISFORTRAN(arr3)) || (PyArray_ISCONTIGUOUS(arr3)))  {
     memcpy (p,PyArray_DATA(arr3),size_ * sizeof(ValueType));
    }
    else { // if the X.py_numpy is not contiguous, first let numpy copy it properly, then memcpy
     PyObject * na = PyObject_CallMethod(X.py_numpy,(char *)"copy",nullptr);
     assert(na);
#ifdef TRIQS_NUMPY_VERSION_LT_17
     PyObject * arr = na;
#else
     if (!PyArray_Check(na)) TRIQS_RUNTIME_ERROR<<"Internal error : is not an array";
     PyArrayObject * arr = (PyArrayObject *)(na);
#endif
     assert( ( PyArray_ISFORTRAN(arr)) || (PyArray_ISCONTIGUOUS(arr)));
     memcpy (p,PyArray_DATA(arr),size_ * sizeof(ValueType));
     Py_DECREF(na);
    }
   }
#endif
  }
Example #14
0
		/// Assignment operator.
		Vector<T>& operator=(const Vector<T>& other) {
			if (this != &other) {
				clear();
				m_size = other.m_size;
				ensure_capacity(other.m_capacity);
				if (m_size != NULL)
					copy_from(other.m_elements, m_size);
			}
			return *this;
		}
Example #15
0
goal::goal(goal const & src):
    m_manager(src.m()),
    m_ref_count(0),
    m_depth(0), 
    m_models_enabled(src.models_enabled()),
    m_proofs_enabled(src.proofs_enabled()), 
    m_core_enabled(src.unsat_core_enabled()), 
    m_inconsistent(false), 
    m_precision(PRECISE) {
    copy_from(src);
    }
Example #16
0
void PianoConfig::interpolate(PianoConfig &prev, 
	PianoConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	double next_scale = (double)(current_frame - prev_frame) / (next_frame - prev_frame);
	double prev_scale = (double)(next_frame - current_frame) / (next_frame - prev_frame);

	copy_from(prev);
	wetness = (int)(prev.wetness * prev_scale + next.wetness * next_scale);
	base_freq = (int)(prev.base_freq * prev_scale + next.base_freq * next_scale);
}
Example #17
0
    archive_options_create::archive_options_create(const archive_options_create & ref)
    {
	x_selection = x_subtree = x_ea_mask = x_compr_mask = x_backup_hook_file_mask = NULL;
	x_entrepot = NULL;
	try
	{
	    copy_from(ref);
	}
	catch(...)
	{
	    destroy();
	    throw;
	}
    }
Example #18
0
void SynthConfig::interpolate(SynthConfig &prev, 
	SynthConfig &next, 
	int64_t prev_frame, 
	int64_t next_frame, 
	int64_t current_frame)
{
	double next_scale = (double)(current_frame - prev_frame) / (next_frame - prev_frame);
	double prev_scale = (double)(next_frame - current_frame) / (next_frame - prev_frame);

	copy_from(prev);
	wetness = (int)(prev.wetness * prev_scale + next.wetness * next_scale);
//	base_freq = (int)(prev.base_freq * prev_scale + next.base_freq * next_scale);

	momentary_notes = prev.momentary_notes;
}
Example #19
0
    const escape_catalogue & escape_catalogue::operator = (const escape_catalogue &ref)
    {
	catalogue *me = this;
	const catalogue *you = &ref;

	destroy();

	    // copying the catalogue part
	*me = *you;

	    // copying the escape_catalogue specific part
	copy_from(ref);

	return *this;
    }
void flowgraph::operator = ( const flowgraph& f )
{
   if( this != &f )
   {
      destructor d( &b );
      visitnodes( d, &b, &e );

      b. pred. clear( ); b. succ. clear( ); 
      e. pred. clear( ); e. succ. clear( ); 

      n = f. n;
      c = f. c;

      copy_from(f);
   }
}
uint ConcurrentCircularBuffer::nb_produce(uint  size, const char* src) 
{
  // assert(size>0);
  uint  capacity = get_capacity();
  uint   wr      = m_wr_ptr; 
  m_rd_wr_lock.lock();
  if ((m_size+size) > capacity) {
    size = capacity - m_size; //reduce size to what is available
    if (size == 0) { // buffer is full
      m_rd_wr_lock.unlock();
      return size;
    }
  }
  copy_from(src, wr, size, capacity);
  m_size += size;
  if (m_blocked_consumer && m_waited_bytes <= m_size) {
    CommandInitiator* thread = (CommandInitiator*) m_blocked_consumer; // cast away volatile...
    m_blocked_consumer = 0;
    thread->resume(true);
  }
  m_wr_ptr = (wr+size) & m_capacity_mask; 
  m_rd_wr_lock.unlock();
  return size; 
}
Example #22
0
 vector &operator=(const vector &v) {
     clear();
     copy_from(v);
     return *this;
 }
Example #23
0
 vector(Container &container) {
     copy_from(container);
 }
Example #24
0
 vector(const std::initializer_list<Type> &list) {
     copy_from(list);
 }
Example #25
0
 vector &push_back(const std::initializer_list<Type> &list) {
     copy_from(list);
     return *this;
 }
Example #26
0
//------------------------------------------------------------------------
const m_texture & m_texture::operator= (const m_texture & tex)
{
	copy_from(tex);
	return *this;
}
Example #27
0
//------------------------------------------------------------------------
m_texture::m_texture(const m_texture & tex)
{
    copy_from(tex);
}
flowgraph::flowgraph( const flowgraph& f )
   : n( f. n ),
     c( f. c )
{ 
   copy_from(f); 
}
Example #29
0
 NeighborSearch<Scalar>::Transformations::Transformations(const Transformations* t) 
 {
   copy_from(t); 
 }
Example #30
0
 NeighborSearch<Scalar>::Transformations::Transformations(const Hermes::vector<unsigned int>& t) 
 {
   copy_from(t); 
 }