Exemple #1
0
int queue_get(void *que,uint32_t *id,uint32_t *op,uint8_t **data,uint32_t *leng) {
	queue *q = (queue*)que;
	qentry *qe;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	while (q->elements==0) {
		q->freewaiting++;
		eassert(pthread_cond_wait(&(q->waitfree),&(q->lock))==0);
	}
	qe = q->head;
	q->head = qe->next;
	if (q->head==NULL) {
		q->tail = &(q->head);
	}
	q->elements--;
	q->size -= qe->leng;
	if (q->fullwaiting>0) {
		eassert(pthread_cond_signal(&(q->waitfull))==0);
		q->fullwaiting--;
	}
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	if (id) {
		*id = qe->id;
	}
	if (op) {
		*op = qe->op;
	}
	if (data) {
		*data = qe->data;
	}
	if (leng) {
		*leng = qe->leng;
	}
	free(qe);
	return 0;
}
Exemple #2
0
static void evict_lower_half (log_t *log)
{
  ptrdiff_t size = ASIZE (log->key_and_value) / 2;
  EMACS_INT median = approximate_median (log, 0, size);
  ptrdiff_t i;

  for (i = 0; i < size; i++)
    /* Evict not only values smaller but also values equal to the median,
       so as to make sure we evict something no matter what.  */
    if (XINT (HASH_VALUE (log, i)) <= median)
      {
	Lisp_Object key = HASH_KEY (log, i);
	{ /* FIXME: we could make this more efficient.  */
	  Lisp_Object tmp;
	  XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr.  */
	  Fremhash (key, tmp);
	}
	eassert (EQ (log->next_free, make_number (i)));
	{
	  int j;
	  eassert (VECTORP (key));
	  for (j = 0; j < ASIZE (key); j++)
	    ASET (key, j, Qnil);
	}
	set_hash_key_slot (log, i, key);
      }
}
static bool
module_copy_string_contents (emacs_env *env, emacs_value value, char *buffer,
			     ptrdiff_t *length)
{
  MODULE_FUNCTION_BEGIN (false);
  Lisp_Object lisp_str = value_to_lisp (value);
  CHECK_STRING (lisp_str);

  Lisp_Object lisp_str_utf8 = ENCODE_UTF_8 (lisp_str);
  ptrdiff_t raw_size = SBYTES (lisp_str_utf8);
  ptrdiff_t required_buf_size = raw_size + 1;

  eassert (length != NULL);

  if (buffer == NULL)
    {
      *length = required_buf_size;
      return true;
    }

  eassert (*length >= 0);

  if (*length < required_buf_size)
    {
      *length = required_buf_size;
      xsignal0 (Qargs_out_of_range);
    }

  *length = required_buf_size;
  memcpy (buffer, SDATA (lisp_str_utf8), raw_size + 1);

  return true;
}
Exemple #4
0
static void
handle_profiler_signal (int signal)
{
  if (EQ (backtrace_top_function (), Qautomatic_gc))
    /* Special case the time-count inside GC because the hash-table
       code is not prepared to be used while the GC is running.
       More specifically it uses ASIZE at many places where it does
       not expect the ARRAY_MARK_FLAG to be set.  We could try and
       harden the hash-table code, but it doesn't seem worth the
       effort.  */
    cpu_gc_count = saturated_add (cpu_gc_count, 1);
  else
    {
      EMACS_INT count = 1;
#ifdef HAVE_ITIMERSPEC
      if (profiler_timer_ok)
	{
	  int overruns = timer_getoverrun (profiler_timer);
	  eassert (overruns >= 0);
	  count += overruns;
	}
#endif
      eassert (HASH_TABLE_P (cpu_log));
      record_backtrace (XHASH_TABLE (cpu_log), count);
    }
}
Exemple #5
0
int queue_tryput(void *que,uint32_t id,uint32_t op,uint8_t *data,uint32_t leng) {
	queue *q = (queue*)que;
	qentry *qe;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	if (q->maxsize) {
		if (leng>q->maxsize) {
			eassert(pthread_mutex_unlock(&(q->lock))==0);
			errno = EDEADLK;
			return -1;
		}
		if (q->size+leng>q->maxsize) {
			eassert(pthread_mutex_unlock(&(q->lock))==0);
			errno = EBUSY;
			return -1;
		}
	}
	qe = malloc(sizeof(qentry));
	passert(qe);
	qe->id = id;
	qe->op = op;
	qe->data = data;
	qe->leng = leng;
	qe->next = NULL;
	q->elements++;
	q->size += leng;
	*(q->tail) = qe;
	q->tail = &(qe->next);
	if (q->freewaiting>0) {
		eassert(pthread_cond_signal(&(q->waitfree))==0);
		q->freewaiting--;
	}
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	return 0;
}
Exemple #6
0
uint32_t queue_elements(void *que) {
	queue *q = (queue*)que;
	uint32_t r;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	r=q->elements;
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	return r;
}
Exemple #7
0
int queue_isfull(void *que) {
	queue *q = (queue*)que;
	int r;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	r = (q->maxsize>0 && q->maxsize<=q->size)?1:0;
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	return r;
}
Exemple #8
0
int queue_isempty(void *que) {
	queue *q = (queue*)que;
	int r;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	r=(q->elements==0)?1:0;
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	return r;
}
Exemple #9
0
static void
record_backtrace (log_t *log, EMACS_INT count)
{
  Lisp_Object backtrace;
  ptrdiff_t index;

  if (!INTEGERP (log->next_free))
    /* FIXME: transfer the evicted counts to a special entry rather
       than dropping them on the floor.  */
    evict_lower_half (log);
  index = XINT (log->next_free);

  /* Get a "working memory" vector.  */
  backtrace = HASH_KEY (log, index);
  get_backtrace (backtrace);

  { /* We basically do a `gethash+puthash' here, except that we have to be
       careful to avoid memory allocation since we're in a signal
       handler, and we optimize the code to try and avoid computing the
       hash+lookup twice.  See fns.c:Fputhash for reference.  */
    EMACS_UINT hash;
    ptrdiff_t j = hash_lookup (log, backtrace, &hash);
    if (j >= 0)
      {
	EMACS_INT old_val = XINT (HASH_VALUE (log, j));
	EMACS_INT new_val = saturated_add (old_val, count);
	set_hash_value_slot (log, j, make_number (new_val));
      }
    else
      { /* BEWARE!  hash_put in general can allocate memory.
	   But currently it only does that if log->next_free is nil.  */
	int j;
	eassert (!NILP (log->next_free));
	j = hash_put (log, backtrace, make_number (count), hash);
	/* Let's make sure we've put `backtrace' right where it
	   already was to start with.  */
	eassert (index == j);

	/* FIXME: If the hash-table is almost full, we should set
	   some global flag so that some Elisp code can offload its
	   data elsewhere, so as to avoid the eviction code.
	   There are 2 ways to do that, AFAICT:
	   - Set a flag checked in QUIT, such that QUIT can then call
	     Fprofiler_cpu_log and stash the full log for later use.
	   - Set a flag check in post-gc-hook, so that Elisp code can call
	     profiler-cpu-log.  That gives us more flexibility since that
	     Elisp code can then do all kinds of fun stuff like write
	     the log to disk.  Or turn it right away into a call tree.
	   Of course, using Elisp is generally preferable, but it may
	   take longer until we get a chance to run the Elisp code, so
	   there's more risk that the table will get full before we
	   get there.  */
      }
  }
}
Exemple #10
0
ImageMask2d::ImageMask2d(Image<float> *image, Mask2d *mask2d, ExamenParams *params) :
 _params(params)
{
    eassert(image->getWidth() == mask2d->getWidth());
    eassert(image->getHeight() == mask2d->getHeight());
    _img = image;
    _mask = mask2d;
    eassert(_img != NULL);
    eassert(_mask != NULL);
    eassert(_params != NULL);
}
Exemple #11
0
uint32_t queue_sizeleft(void *que) {
	queue *q = (queue*)que;
	uint32_t r;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	if (q->maxsize>0) {
		r = q->maxsize-q->size;
	} else {
		r = 0xFFFFFFFF;
	}
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	return r;
}
Exemple #12
0
static EMACS_INT approximate_median (log_t *log,
				     ptrdiff_t start, ptrdiff_t size)
{
  eassert (size > 0);
  if (size < 2)
    return XINT (HASH_VALUE (log, start));
  if (size < 3)
    /* Not an actual median, but better for our application than
       choosing either of the two numbers.  */
    return ((XINT (HASH_VALUE (log, start))
	     + XINT (HASH_VALUE (log, start + 1)))
	    / 2);
  else
    {
      ptrdiff_t newsize = size / 3;
      ptrdiff_t start2 = start + newsize;
      EMACS_INT i1 = approximate_median (log, start, newsize);
      EMACS_INT i2 = approximate_median (log, start2, newsize);
      EMACS_INT i3 = approximate_median (log, start2 + newsize,
					 size - 2 * newsize);
      return (i1 < i2
	      ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
	      : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
    }
}
std::shared_ptr< ExhaustiveLPOModelTrainer > makeExhaustiveTrainer( const ExhaustiveLPOModel & that, const std::vector< std::shared_ptr< ImageOverSegmentation > >& ios, const std::vector< T >& gt ) {
    std::string name = typeStr(that);

    std::vector< VectorXf > params = that.all_params_;
    const int N = ios.size(), M = params.size();
    std::vector<int> oid( gt.size()+1 );
    std::transform( gt.begin(), gt.end(), oid.data()+1, static_cast<int(*)(const T&)>(&no) );
    std::partial_sum( oid.begin(), oid.end(), oid.begin() );

    std::vector< float > avg_prop( params.size(), 0. );
    std::vector< std::vector< float > > iou( params.size(), std::vector< float >(oid.back(),0.f) );
    #pragma omp parallel for
    for( int i=0; i<N; i++ ) {
        std::vector<Proposals> s = that.generateProposals( *ios[i], params );
        for( int j=0; j<M; j++ ) {
            Proposals p = s[j];

            SegmentationOverlap o(p.s, gt[i]);
            const int no = o.nObjects();
            eassert( oid[i]+no == oid[i+1] );

            auto best_iou = VectorXf::Map(iou[j].data()+oid[i],no);
            int n = p.p.rows();
            for( int k=0; k<n; k++ )
                best_iou = best_iou.array().max( o.iou( p.p.row(k) ).array() );

            #pragma omp atomic
            avg_prop[j] += 1.0 * n / N;
        }
    }
    return std::make_shared<ExhaustiveLPOModelTrainerImplementation>( name, params, iou, avg_prop );
}
RowVectorXf operator*(const RowVectorXf& o, const SeedFeature& f) {
	eassert( o.size() == f.static_f_.rows() );
	RowVectorXf r( f.static_f_.cols() + f.dynamic_f_.cols() );
	r.head(f.static_f_.cols())  = o * f.static_f_;
	r.tail(f.dynamic_f_.cols()) = o * f.dynamic_f_;
	return r;
}
Exemple #15
0
void
free_before_dump (void *ptr)
{
  if (!ptr)
    return;

  /* Before dumping.  */
  if (dumped_data < (unsigned char *)ptr
      && (unsigned char *)ptr < bc_limit)
    {
      /* Free the block if it is allocated in the private heap.  */
      HeapFree (heap, 0, ptr);
    }
  else
    {
      /* Look for the big chunk.  */
      int i;

      for (i = 0; i < blocks_number; i++)
	{
	  if (blocks[i].address == ptr)
	    {
	      /* Reset block occupation if found.  */
	      blocks[i].occupied = 0;
	      break;
	    }
	  /* What if the block is not found?  We should trigger an
	     error here.  */
	  eassert (i < blocks_number);
	}
    }
}
Exemple #16
0
/* Lock MUTEX for thread LOCKER, setting its lock count to COUNT, if
   non-zero, or to 1 otherwise.

   If MUTEX is locked by LOCKER, COUNT must be zero, and the MUTEX's
   lock count will be incremented.

   If MUTEX is locked by another thread, this function will release
   the global lock, giving other threads a chance to run, and will
   wait for the MUTEX to become unlocked; when MUTEX becomes unlocked,
   and will then re-acquire the global lock.

   Return value is 1 if the function waited for the MUTEX to become
   unlocked (meaning other threads could have run during the wait),
   zero otherwise.  */
static int
lisp_mutex_lock_for_thread (lisp_mutex_t *mutex, struct thread_state *locker,
			    int new_count)
{
  struct thread_state *self;

  if (mutex->owner == NULL)
    {
      mutex->owner = locker;
      mutex->count = new_count == 0 ? 1 : new_count;
      return 0;
    }
  if (mutex->owner == locker)
    {
      eassert (new_count == 0);
      ++mutex->count;
      return 0;
    }

  self = locker;
  self->wait_condvar = &mutex->condition;
  while (mutex->owner != NULL && (new_count != 0
				  || NILP (self->error_symbol)))
    sys_cond_wait (&mutex->condition, &global_lock);
  self->wait_condvar = NULL;

  if (new_count == 0 && !NILP (self->error_symbol))
    return 1;

  mutex->owner = self;
  mutex->count = new_count == 0 ? 1 : new_count;

  return 1;
}
Exemple #17
0
std::vector< Proposals > LPO::propose(const ImageOverSegmentation& ios, float max_iou, int model_id, bool box_nms) const {
	std::vector< Proposals > all_prop;
	// Generate all proposals
	for( int i=0; i<models_.size(); i++ )
		if( i==model_id || model_id==-1 ){
			const std::vector<Proposals> & props = models_[i]->propose( ios );
			for( const Proposals & p: props ) {
				// Can we merge some proposal maps?
				bool merge = false;
				for( Proposals & pp: all_prop ) {
					if( pp.s == p.s ) {
						eassert( pp.p.cols() == p.p.cols() );

						// Merge the proposal maps
						merge = true;
						RMatrixXb new_p( pp.p.rows()+p.p.rows(), p.p.cols() );
						new_p.topRows  ( pp.p.rows() ) = pp.p;
						new_p.bottomRows( p.p.rows() ) = p.p;
						pp.p = new_p;
						break;
					}
				}
				if( !merge )
					all_prop.push_back( p );
			}
		}
	if( box_nms )
		return boxNms( all_prop, max_iou );
	// Remove empty proposals and (near) duplicates
	// Only doing it on CRF proposals is much faster and doesn't generate
	// too many more proposals (~100 more)
	all_prop[0] = nms( all_prop[0], max_iou );
	return all_prop;
// 	return nms( all_prop, max_iou );
}
Exemple #18
0
std::vector<TrainingParameters> filterParameters( const std::vector<TrainingParameters> & params, float f0 ) {
	eassert( params.size()>0 );
	// Compute the transportation cost
	RMatrixXf score = 1.f-scoreMatrix( params ).array();

	// Compute the facility cost
	VectorXf f = VectorXf::Constant(params.size(),f0);
	for( int i=0; i<params.size(); i++ )
		f[i] = params[i].nProposals()*f0;

	VectorXb r;
	{
		VectorXb x[1];
		float s[10] = {0.f};
		int N = sizeof(x) / sizeof(x[0]);
		x[0] = Floc::greedy( f, score );
		if( N > 1 )
			x[1] = Floc::jms( f, score );
		if( N > 2 )
			x[2] = Floc::myz( f, score );

		for( int i=0; i<N; i++ )
			s[i] = Floc::energy( f, score, x[i] );

// 		printf("Filter greedy = %f   jms = %f   myz = %f\n", s[0], s[1], s[2] );
		r = x[0];
		float rs = s[0];
		for( int i=1; i<N; i++ )
			if( s[i] < rs ) {
				rs = s[i];
				r = x[i];
			}
	}
	return filter( params, r );
}
Exemple #19
0
RMatrixXf scoreMatrix( const std::vector<TrainingParameters> & params ) {
	eassert( params.size()>0 );
	int D = params.front().accuracy.size();
	RMatrixXf score( params.size(), D );
	for( int i=0; i<params.size(); i++ )
		score.row( i ) = params[i].accuracy.transpose();
	return score;
}
Exemple #20
0
void* queue_new(uint32_t size) {
	queue *q;
	q = (queue*)malloc(sizeof(queue));
	passert(q);
	q->head = NULL;
	q->tail = &(q->head);
	q->elements = 0;
	q->size = 0;
	q->maxsize = size;
	q->freewaiting = 0;
	q->fullwaiting = 0;
	if (size) {
		eassert(pthread_cond_init(&(q->waitfull),NULL)==0);
	}
	eassert(pthread_cond_init(&(q->waitfree),NULL)==0);
	eassert(pthread_mutex_init(&(q->lock),NULL)==0);
	return q;
}
static VectorXs map_id( const RMatrixXi & ms, const RMatrixXs & s ) {
	eassert( ms.rows() == s.rows() && ms.cols() == s.cols() );
	int Nms = ms.maxCoeff()+1;
	VectorXs r = VectorXs::Zero(Nms);
	for( int j=0; j<s.rows(); j++ )
		for( int i=0; i<s.cols(); i++ )
			r[ ms(j,i) ] = s(j,i);
	return r;
}
Exemple #22
0
static inline void job_send_status(jobpool *jp,uint32_t jobid,uint8_t status) {
	zassert(pthread_mutex_lock(&(jp->pipelock)));
	if (queue_isempty(jp->statusqueue)) {	// first status
		eassert(write(jp->wpipe,&status,1)==1);	// write anything to wake up select
	}
	queue_put(jp->statusqueue,jobid,status,NULL,1);
	zassert(pthread_mutex_unlock(&(jp->pipelock)));
	return;
}
Exemple #23
0
static bool
module_copy_string_contents (emacs_env *env, emacs_value value, char *buffer,
			     ptrdiff_t *length)
{
  MODULE_FUNCTION_BEGIN (false);
  Lisp_Object lisp_str = value_to_lisp (value);
  if (! STRINGP (lisp_str))
    {
      module_wrong_type (env, Qstringp, lisp_str);
      return false;
    }

  Lisp_Object lisp_str_utf8 = ENCODE_UTF_8 (lisp_str);
  ptrdiff_t raw_size = SBYTES (lisp_str_utf8);
  if (raw_size == PTRDIFF_MAX)
    {
      module_non_local_exit_signal_1 (env, Qoverflow_error, Qnil);
      return false;
    }
  ptrdiff_t required_buf_size = raw_size + 1;

  eassert (length != NULL);

  if (buffer == NULL)
    {
      *length = required_buf_size;
      return true;
    }

  eassert (*length >= 0);

  if (*length < required_buf_size)
    {
      *length = required_buf_size;
      module_non_local_exit_signal_1 (env, Qargs_out_of_range, Qnil);
      return false;
    }

  *length = required_buf_size;
  memcpy (buffer, SDATA (lisp_str_utf8), raw_size + 1);

  return true;
}
Exemple #24
0
void LPO::load( std::istream & is ) {
	int n = 0;
	is.read( (char*)&n, sizeof(n) );
	eassert( n == PROP_MAGIC );
	n=0;
	is.read( (char*)&n, sizeof(n) );
	models_.clear();
	for( int i=0; i<n; i++ )
		models_.push_back( loadLPOModel(is) );
}
Exemple #25
0
void *
sbrk (ptrdiff_t increment)
{
  /* data_region_end is the address beyond the last allocated byte.
     The sbrk() function is not emulated at all, except for a 0 value
     of its parameter.  This is needed by the Emacs Lisp function
     `memory-limit'.  */
  eassert (increment == 0);
  return data_region_end;
}
Exemple #26
0
void queue_delete(void *que) {
	queue *q = (queue*)que;
	qentry *qe,*qen;
	eassert(pthread_mutex_lock(&(q->lock))==0);
	sassert(q->freewaiting==0);
	sassert(q->fullwaiting==0);
	for (qe = q->head ; qe ; qe = qen) {
		qen = qe->next;
		free(qe->data);
		free(qe);
	}
	eassert(pthread_mutex_unlock(&(q->lock))==0);
	eassert(pthread_mutex_destroy(&(q->lock))==0);
	eassert(pthread_cond_destroy(&(q->waitfree))==0);
	if (q->maxsize) {
		eassert(pthread_cond_destroy(&(q->waitfull))==0);
	}
	free(q);
}
Exemple #27
0
static void
vox_configure (struct sound_device *sd)
{
  int val;
#ifdef USABLE_SIGIO
  sigset_t oldset, blocked;
#endif

  eassert (sd->fd >= 0);

  /* On GNU/Linux, it seems that the device driver doesn't like to be
     interrupted by a signal.  Block the ones we know to cause
     troubles.  */
  turn_on_atimers (0);
#ifdef USABLE_SIGIO
  sigemptyset (&blocked);
  sigaddset (&blocked, SIGIO);
  pthread_sigmask (SIG_BLOCK, &blocked, &oldset);
#endif

  val = sd->format;
  if (ioctl (sd->fd, SNDCTL_DSP_SETFMT, &sd->format) < 0
      || val != sd->format)
    sound_perror ("Could not set sound format");

  val = sd->channels != 1;
  if (ioctl (sd->fd, SNDCTL_DSP_STEREO, &val) < 0
      || val != (sd->channels != 1))
    sound_perror ("Could not set stereo/mono");

  /* I think bps and sampling_rate are the same, but who knows.
     Check this. and use SND_DSP_SPEED for both.  */
  if (sd->sample_rate > 0)
    {
      val = sd->sample_rate;
      if (ioctl (sd->fd, SNDCTL_DSP_SPEED, &sd->sample_rate) < 0)
	sound_perror ("Could not set sound speed");
      else if (val != sd->sample_rate)
	sound_warning ("Could not set sample rate");
    }

  if (sd->volume > 0)
    {
      int volume = sd->volume & 0xff;
      volume |= volume << 8;
      /* This may fail if there is no mixer.  Ignore the failure.  */
      ioctl (sd->fd, SOUND_MIXER_WRITE_PCM, &volume);
    }

  turn_on_atimers (1);
#ifdef USABLE_SIGIO
  pthread_sigmask (SIG_SETMASK, &oldset, 0);
#endif
}
Exemple #28
0
void
discard_menu_items (void)
{
  /* Free the structure if it is especially large.
     Otherwise, hold on to it, to save time.  */
  if (menu_items_allocated > 200)
    {
      menu_items = Qnil;
      menu_items_allocated = 0;
    }
  eassert (NILP (menu_items_inuse));
}
Exemple #29
0
static inline int job_receive_status(jobpool *jp,uint32_t *jobid,uint8_t *status) {
	uint32_t qstatus;
	zassert(pthread_mutex_lock(&(jp->pipelock)));
	queue_get(jp->statusqueue,jobid,&qstatus,NULL,NULL);
	*status = qstatus;
	if (queue_isempty(jp->statusqueue)) {
		eassert(read(jp->rpipe,&qstatus,1)==1);	// make pipe empty
		zassert(pthread_mutex_unlock(&(jp->pipelock)));
		return 0;	// last element
	}
	zassert(pthread_mutex_unlock(&(jp->pipelock)));
	return 1;	// not last
}
Exemple #30
0
/* Like lisp_mutex_unlock, but sets MUTEX's lock count to zero
   regardless of its value.  Return the previous lock count.  */
static unsigned int
lisp_mutex_unlock_for_wait (lisp_mutex_t *mutex)
{
  unsigned int result = mutex->count;

  /* Ensured by condvar code.  */
  eassert (mutex->owner == current_thread);

  mutex->count = 0;
  mutex->owner = NULL;
  sys_cond_broadcast (&mutex->condition);

  return result;
}