EGLImageKHR S60VideoEglRendererControl::acquireEglImage(S60EglEndpoint *endpoint) const
{
    VERBOSE_TRACE("S60VideoEglRendererControl::acquireEglImage" << qtThisPtr());
    EGLImageKHR image = EGL_NO_IMAGE_KHR;
    if (m_eglEndpoint == endpoint)
        image = m_eglEndpoint->acquireImage();
    return image;
}
void S60VideoEglRendererControl::releaseEglImage(S60EglEndpoint *endpoint, qint64 delay)
{
    VERBOSE_TRACE("S60VideoEglRendererControl::releaseEglImage" << qtThisPtr()
                  << "delay" << delay);
    if (m_eglEndpoint == endpoint) {
        if (delay)
            m_eglEndpoint->setDelay(delay);
        m_eglEndpoint->releaseImage();
    }
    m_buffer = 0;
}
Exemple #3
0
static void *
__mf_pthread_spawner (void *arg)
{
  struct pthread_info *pi = __mf_find_threadinfo ();
  void *result = NULL;

  /* Turn off reentrancy indications.  */
  assert (pi->state == reentrant);
  pi->state = active;

  VERBOSE_TRACE ("new user thread\n");
  
  if (__mf_opts.heur_std_data)
    {
      pi->thread_errno = & errno;
      __mf_register (pi->thread_errno, sizeof (int), 
		     __MF_TYPE_GUESS, "errno area (thread)");
      /* NB: we could use __MF_TYPE_STATIC above, but we guess that
	 the thread errno is coming out of some dynamically allocated
	 pool that we already know of as __MF_TYPE_HEAP. */
    }

  /* We considered using pthread_key_t objects instead of these
     cleanup stacks, but they were less cooperative with the
     interposed malloc hooks in libmudflap.  */
  pthread_cleanup_push (& __mf_pthread_cleanup, pi);

  /* Call user thread */
  {
    /* Extract given entry point and argument.  */
    struct pthread_start_info *psi = arg;
    void * (*user_fn)(void *) = psi->user_fn;
    void *user_arg = psi->user_arg;

    /* Signal the main thread to resume.  */
    psi->thread_info = pi;
      
    result = (*user_fn)(user_arg);
  }

  pthread_cleanup_pop (1 /* execute */);

  /* NB: there is a slight race here.  The pthread_info field will now
     say this thread is dead, but it may still be running .. right
     here.  We try to check for this possibility using the
     pthread_kill test below. */

  return result;
}
Exemple #4
0
static void 
__mf_pthread_cleanup (void *arg)
{
  struct pthread_info *pi = arg;

  /* XXX: This unregistration is not safe on platforms where distinct
     threads share errno (or at least its virtual address).  */
  if (pi->thread_errno != NULL)
    __mf_unregister (pi->thread_errno, sizeof (int), __MF_TYPE_GUESS);

  /* XXX: Only detached threads should designate themselves as dead
     here.  Non-detached threads are marked dead after their
     personalized pthread_join() call.  */
  pi->state = reentrant;
  pi->dead_p = 1;

  VERBOSE_TRACE ("thread pi %p exiting\n", pi);
}
void S60VideoEglRendererControl::imageAvailable()
{
    const EGLenum api = currentEglApi();
    VERBOSE_TRACE("S60VideoEglRendererControl::imageAvailable" << qtThisPtr()
                  << "api" << api);
    Q_ASSERT(!m_buffer);

    if (!api)
        return;

    if (m_doesProduceEglImages) {
        m_buffer = new S60EglImageVideoBuffer(this, m_eglEndpoint);
    } else {
        switch (api) {
#ifndef QT_NO_OPENGL
        case EGL_OPENGL_ES_API:
            m_buffer = new S60GlTextureVideoBuffer(this, m_eglEndpoint, m_eglExtensions);
            break;
#endif
#ifndef QT_NO_OPENVG
        case EGL_OPENVG_API:
            m_buffer = new S60VgImageVideoBuffer(this, m_eglEndpoint, m_eglExtensions);
            break;
#endif
        }
    }
    Q_ASSERT(m_buffer);
    QVideoSurfaceFormat format(m_nativeSize,
                               QVideoFrame::Format_RGB32,
                               m_buffer->handleType());
    format.setScanLineDirection(QVideoSurfaceFormat::BottomToTop);
    if (!m_surface->isActive())
        m_surface->start(format);
    const QVideoFrame frame(m_buffer, format.frameSize(), format.pixelFormat());
    m_surface->present(frame);
#ifdef VIDEOOUTPUT_MEASURE_FRAMERATE
    m_frameRate->notify();
#endif
}
Exemple #6
0
/* This wrapper is a little different, as it's called indirectly from
   __mf_fini also to clean up pending allocations.  */
void *
__mf_wrap_alloca_indirect (size_t c)
{
  DECLARE (void *, malloc, size_t);
  DECLARE (void, free, void *);

  /* This struct, a linked list, tracks alloca'd objects.  The newest
     object is at the head of the list.  If we detect that we've
     popped a few levels of stack, then the listed objects are freed
     as needed.  NB: The tracking struct is allocated with
     real_malloc; the user data with wrap_malloc.
  */
  struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
  static struct alloca_tracking *alloca_history = NULL;

  void *stack = __builtin_frame_address (0);
  void *result;
  struct alloca_tracking *track;

  TRACE ("%s\n", __PRETTY_FUNCTION__);
  VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);

  /* XXX: thread locking! */

  /* Free any previously alloca'd blocks that belong to deeper-nested functions,
     which must therefore have exited by now.  */

#define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */

  while (alloca_history &&
	 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
    {
      struct alloca_tracking *next = alloca_history->next;
      __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
      BEGIN_MALLOC_PROTECT ();
      CALL_REAL (free, alloca_history->ptr);
      CALL_REAL (free, alloca_history);
      END_MALLOC_PROTECT ();
      alloca_history = next;
    }

  /* Allocate new block.  */
  result = NULL;
  if (LIKELY (c > 0)) /* alloca(0) causes no allocation.  */
    {
      BEGIN_MALLOC_PROTECT ();
      track = (struct alloca_tracking *) CALL_REAL (malloc,
						    sizeof (struct alloca_tracking));
      END_MALLOC_PROTECT ();
      if (LIKELY (track != NULL))
	{
	  BEGIN_MALLOC_PROTECT ();
	  result = CALL_REAL (malloc, c);
	  END_MALLOC_PROTECT ();
	  if (UNLIKELY (result == NULL))
	    {
	      BEGIN_MALLOC_PROTECT ();
	      CALL_REAL (free, track);
	      END_MALLOC_PROTECT ();
	      /* Too bad.  XXX: What about errno?  */
	    }
	  else
	    {
	      __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
	      track->ptr = result;
	      track->stack = stack;
	      track->next = alloca_history;
	      alloca_history = track;
	    }
	}
    }

  return result;
}
Exemple #7
0
WRAPPER(void, free, void *buf)
{
  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
  static void *free_queue [__MF_FREEQ_MAX];
  static unsigned free_ptr = 0;
  static int freeq_initialized = 0;
  DECLARE(void, free, void *);

  if (UNLIKELY(buf == NULL))
    return;

  BEGIN_PROTECT (free, buf);

#if PIC
  /* Check whether the given buffer might have come from a
     __mf_0fn_malloc/calloc call that for whatever reason was not
     redirected back to __mf_0fn_free.  If so, we just ignore the
     call. */
  if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
               (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
  {
    VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
    return;
  }
#endif

  LOCKTH ();
  if (UNLIKELY(!freeq_initialized))
    {
      memset (free_queue, 0,
		     __MF_FREEQ_MAX * sizeof (void *));
      freeq_initialized = 1;
    }
  UNLOCKTH ();

  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
  /* NB: underlying region may have been __MF_TYPE_HEAP. */

  if (UNLIKELY(__mf_opts.free_queue_length > 0))
    {
      char *freeme = NULL;
      LOCKTH ();
      if (free_queue [free_ptr] != NULL)
	{
	  freeme = free_queue [free_ptr];
	  freeme -= __mf_opts.crumple_zone;
	}
      free_queue [free_ptr] = buf;
      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
      UNLOCKTH ();
      if (freeme)
	{
	  if (__mf_opts.trace_mf_calls)
	    {
	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
			     (void *) freeme,
			     __mf_opts.crumple_zone);
	    }
	  BEGIN_MALLOC_PROTECT ();
	  CALL_REAL (free, freeme);
	  END_MALLOC_PROTECT ();
	}
    }
  else
    {
      /* back pointer up a bit to the beginning of crumple zone */
      char *base = (char *)buf;
      base -= __mf_opts.crumple_zone;
      if (__mf_opts.trace_mf_calls)
	{
	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
			 (void *) base,
			 (void *) buf,
			 __mf_opts.crumple_zone);
	}
      BEGIN_MALLOC_PROTECT ();
      CALL_REAL (free, base);
      END_MALLOC_PROTECT ();
    }
}
Exemple #8
0
WRAPPER(void, free, void *buf)
{
  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
  static void *free_queue [__MF_FREEQ_MAX];
  static unsigned free_ptr = 0;
  static int freeq_initialized = 0;
  DECLARE(void, free, void *);

  BEGIN_PROTECT (free, buf);

  if (UNLIKELY(buf == NULL))
    return;

  LOCKTH ();
  if (UNLIKELY(!freeq_initialized))
    {
      memset (free_queue, 0,
		     __MF_FREEQ_MAX * sizeof (void *));
      freeq_initialized = 1;
    }
  UNLOCKTH ();

  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
  /* NB: underlying region may have been __MF_TYPE_HEAP. */

  if (UNLIKELY(__mf_opts.free_queue_length > 0))
    {
      char *freeme = NULL;
      LOCKTH ();
      if (free_queue [free_ptr] != NULL)
	{
	  freeme = free_queue [free_ptr];
	  freeme -= __mf_opts.crumple_zone;
	}
      free_queue [free_ptr] = buf;
      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
      UNLOCKTH ();
      if (freeme)
	{
	  if (__mf_opts.trace_mf_calls)
	    {
	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
			     (void *) freeme,
			     __mf_opts.crumple_zone);
	    }
	  BEGIN_MALLOC_PROTECT ();
	  CALL_REAL (free, freeme);
	  END_MALLOC_PROTECT ();
	}
    }
  else
    {
      /* back pointer up a bit to the beginning of crumple zone */
      char *base = (char *)buf;
      base -= __mf_opts.crumple_zone;
      if (__mf_opts.trace_mf_calls)
	{
	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
			 (void *) base,
			 (void *) buf,
			 __mf_opts.crumple_zone);
	}
      BEGIN_MALLOC_PROTECT ();
      CALL_REAL (free, base);
      END_MALLOC_PROTECT ();
    }
}
Exemple #9
0
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr, 
	 void * (*start) (void *), void *arg)
{
  DECLARE(int, munmap, void *p, size_t l);
  DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
  DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr, 
	  void * (*start) (void *), void *arg);
  int result;
  pthread_attr_t override_attr;
  void *override_stack;
  size_t override_stacksize;
  void *override_stack_alloc = (void *) 0;
  size_t override_stacksize_alloc = 0;
  unsigned i;

  TRACE ("pthread_create\n");

  /* Garbage-collect dead threads' stacks.  */
  LOCKTH ();
  for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
    {
      struct pthread_info *pi = & __mf_pthread_info [i];
      if (! pi->used_p)
	continue;
      if (! pi->dead_p)
	continue;

      /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
	 (unsigned) pi->self, pi, pi->dead_p); */
	      
      /* Delay actual deallocation by a few cycles, try to discourage the
	 race mentioned at the end of __mf_pthread_spawner().  */
      if (pi->dead_p)
	pi->dead_p ++;
      if (pi->dead_p >= 10 /* XXX */)
	{
	  if (pi->stack)
	    CALL_REAL (munmap, pi->stack_alloc, pi->stack_size_alloc);

	  VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack_alloc);
	  memset (pi, 0, sizeof (*pi));

	  /* One round of garbage collection is enough.  */
	  break;
	}
    }
  UNLOCKTH ();

  /* Let's allocate a stack for this thread, if one is not already
     supplied by the caller.  We don't want to let e.g. the
     linuxthreads manager thread do this allocation.  */
  if (attr != NULL)
    override_attr = *attr;
  else
    pthread_attr_init (& override_attr);

  /* Get supplied attributes, if any.  */
  /* XXX: consider using POSIX2K attr_getstack() */
  if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
      pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
    {
      override_stack = NULL;
      override_stacksize = 0;
    }

  /* Do we need to allocate the new thread's stack?  */
  if (__mf_opts.thread_stack && override_stack == NULL)
    {
      uintptr_t alignment = 256; /* power of two */

      /* Perturb the initial stack addresses slightly, to encourage
	 threads to have nonconflicting entries in the lookup cache
	 for their tracked stack objects.  */
      static unsigned perturb = 0;
      const unsigned perturb_delta = 32;
      const unsigned perturb_count = 16;
      perturb += perturb_delta;
      if (perturb > perturb_delta*perturb_count) perturb = 0;

      /* Use glibc x86 defaults */
/* Should have been defined in <limits.h> */
#ifndef PTHREAD_STACK_MIN
#define PTHREAD_STACK_MIN 65536
#endif
      override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);


#if defined(MAP_ANONYMOUS)
#define MF_MAP_ANON MAP_ANONYMOUS
#elif defined(MAP_ANON)
#define MF_MAP_ANON MAP_ANON
#endif

#ifndef MAP_FAILED
#define MAP_FAILED ((void *) -1)
#endif

#ifdef MF_MAP_ANON
      override_stack = CALL_REAL (mmap, NULL, override_stacksize, 
				  PROT_READ|PROT_WRITE, 
				  MAP_PRIVATE|MF_MAP_ANON,
				  0, 0);
#else
      /* Try mapping /dev/zero instead.  */
      {
        static int zerofd = -1;
        if (zerofd == -1)
          zerofd = open ("/dev/zero", O_RDWR);
        if (zerofd == -1)
          override_stack = MAP_FAILED;
        else
          override_stack = CALL_REAL (mmap, NULL, override_stacksize, 
                                      PROT_READ|PROT_WRITE, 
                                      MAP_PRIVATE, zerofd, 0);
      }
#endif

      if (override_stack == 0 || override_stack == MAP_FAILED)
	{
	  errno = EAGAIN;
	  return -1;
	}

      VERBOSE_TRACE ("thread stack alloc %p size %lu\n", 
		     override_stack, (unsigned long) override_stacksize);

      /* Save the original allocated values for later deallocation.  */
      override_stack_alloc = override_stack;
      override_stacksize_alloc = override_stacksize;

      /* The stackaddr pthreads attribute is a candidate stack pointer.
	 It must point near the top or the bottom of this buffer, depending
	 on whether stack grows downward or upward, and suitably aligned.
	 On the x86, it grows down, so we set stackaddr near the top.  */
      /* XXX: port logic */
      override_stack = (void *)
	(((uintptr_t) override_stack + override_stacksize - alignment - perturb)
	 & (~(uintptr_t)(alignment-1)));
      
      /* XXX: consider using POSIX2K attr_setstack() */
      if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
	  pthread_attr_setstacksize (& override_attr, 
				     override_stacksize - alignment - perturb) != 0)
	{
	  /* This should not happen.  */
	  CALL_REAL (munmap, override_stack, override_stacksize);
	  errno = EAGAIN;
	  return -1;
	}
  }

  /* Actually start the child thread.  */
  {
    struct pthread_start_info psi;
    struct pthread_info *pi = NULL;
    
    /* Fill in startup-control fields.  */
    psi.user_fn = start;
    psi.user_arg = arg;
    psi.thread_info = NULL;
    
    /* Actually create the thread.  */
    __mf_state = reentrant;
    result = CALL_REAL (pthread_create, thr, & override_attr,
			& __mf_pthread_spawner, (void *) & psi);
    __mf_state = active;
    /* We also hook pthread_join/pthread_exit to get into reentrant
       mode during thread shutdown/cleanup.  */

    /* Wait until child thread has progressed far enough into its
       __mf_pthread_spawner() call.  */
    while (1) /* XXX: timeout? */
      {
	volatile struct pthread_start_info *psip = & psi;
	pi = psip->thread_info;
	if (pi != NULL) 
	  break;
	sched_yield ();
      }

    /* Fill in remaining fields in pthread_info. */
    pi->stack = override_stack;
    pi->stack_size = override_stacksize;
    pi->stack_alloc = override_stack_alloc;
    pi->stack_size_alloc = override_stacksize_alloc;
    /* XXX: this might be too late for future heuristics that attempt
       to use thread stack bounds.  We may need to put the new thread
       to sleep. */
  }


  /* May need to clean up if we created a pthread_attr_t of our own.  */
  if (attr == NULL)
    pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */

  return result;
}