Ejemplo n.º 1
0
ArvBuffer *
arv_stream_timeout_pop_buffer (ArvStream *stream, guint64 timeout)
{
#if GLIB_CHECK_VERSION(2,32,0)
	g_return_val_if_fail (ARV_IS_STREAM (stream), NULL);

	return g_async_queue_timeout_pop (stream->priv->output_queue, timeout);
#else
	GTimeVal end_time;

	g_return_val_if_fail (ARV_IS_STREAM (stream), NULL);

	g_get_current_time (&end_time);
	g_time_val_add (&end_time, timeout);

	return g_async_queue_timed_pop (stream->priv->output_queue, &end_time);
#endif
}
Ejemplo n.º 2
0
static glong
swfdec_iterate_get_msecs_to_next_event (GSource *source_)
{
  SwfdecIterateSource *source = (SwfdecIterateSource *) source_;
  GTimeVal now;
  glong diff;

  g_assert (source->player);
  diff = swfdec_player_get_next_event (source->player);
  if (diff == -1)
    return G_MAXLONG;
  diff *= source->speed;
  g_source_get_current_time (source_, &now);
  /* should really add to source->last instead of subtracting from now */
  g_time_val_add (&now, -diff * 1000);
  diff = my_time_val_difference (&source->last, &now);

  return diff;
}
CV_IMPL int cvWaitKey( int delay )
{
#ifdef HAVE_GTHREAD
	if(thread_started && g_thread_self()!=window_thread){
		gboolean expired;
		int my_last_key;

		// wait for signal or timeout if delay > 0
		if(delay>0){
			GTimeVal timer;
			g_get_current_time(&timer);
			g_time_val_add(&timer, delay*1000);
			expired = !g_cond_timed_wait(cond_have_key, last_key_mutex, &timer);
		}
		else{
			g_cond_wait(cond_have_key, last_key_mutex);
			expired=false;
		}
		my_last_key = last_key;
		g_mutex_unlock(last_key_mutex);
		if(expired || hg_windows==0){
			return -1;
		}
		return my_last_key;
	}
	else{
#endif
		int expired = 0;
		guint timer = 0;
		if( delay > 0 )
			timer = g_timeout_add( delay, icvAlarm, &expired );
		last_key = -1;
		while( gtk_main_iteration_do(TRUE) && last_key < 0 && !expired && hg_windows != 0 )
			;

		if( delay > 0 && !expired )
			g_source_remove(timer);
#ifdef HAVE_GTHREAD
	}
#endif
	return last_key;
}
Ejemplo n.º 4
0
/*
 * clutter_timeline_do_tick
 * @timeline: a #ClutterTimeline
 * @tick_time: time of advance
 *
 * Advances @timeline based on the time passed in @msecs. This
 * function is called by the master clock. The @timeline will use this
 * interval to emit the #ClutterTimeline::new-frame signal and
 * eventually skip frames.
 */
void
clutter_timeline_do_tick (ClutterTimeline *timeline,
			  GTimeVal        *tick_time)
{
  ClutterTimelinePrivate *priv;

  g_return_if_fail (CLUTTER_IS_TIMELINE (timeline));

  priv = timeline->priv;

  if (priv->waiting_first_tick)
    {
      priv->last_frame_time = *tick_time;
      priv->waiting_first_tick = FALSE;
    }
  else
    {
      gint64 msecs;

      msecs = (tick_time->tv_sec - priv->last_frame_time.tv_sec) * 1000
            + (tick_time->tv_usec - priv->last_frame_time.tv_usec) / 1000;

      /* if the clock rolled back between ticks we need to
       * account for it; the best course of action, since the
       * clock roll back can happen by any arbitrary amount
       * of milliseconds, is to drop a frame here
       */
      if (msecs < 0)
        {
          priv->last_frame_time = *tick_time;
          return;
        }

      if (msecs != 0)
	{
	  /* Avoid accumulating error */
	  g_time_val_add (&priv->last_frame_time, msecs * 1000L);
	  priv->msecs_delta = msecs;
	  clutter_timeline_do_frame (timeline);
	}
    }
}
Ejemplo n.º 5
0
/**
 * oh_dequeue_session_event
 * @sid:
 * @event:
 *
 *
 *
 * Returns:
 **/
SaErrorT oh_dequeue_session_event(SaHpiSessionIdT sid,
                                  SaHpiTimeoutT timeout,
                                  struct oh_event *event)
{
       struct oh_session *session = NULL;
       struct oh_event *devent = NULL;
       GTimeVal gfinaltime;
       GAsyncQueue *eventq = NULL;

       if (sid < 1 || (event == NULL)) return SA_ERR_HPI_INVALID_PARAMS;

       g_static_rec_mutex_lock(&oh_sessions.lock); /* Locked session table */
       session = g_hash_table_lookup(oh_sessions.table, &sid);
       if (!session) {
               g_static_rec_mutex_unlock(&oh_sessions.lock);
               return SA_ERR_HPI_INVALID_SESSION;
       }
       eventq = session->eventq;
       g_async_queue_ref(eventq);
       g_static_rec_mutex_unlock(&oh_sessions.lock);

       if (timeout == SAHPI_TIMEOUT_IMMEDIATE) {
               devent = g_async_queue_try_pop(eventq);
       } else if (timeout == SAHPI_TIMEOUT_BLOCK) {
               devent = g_async_queue_pop(eventq); /* FIXME: Need to time this. */
       } else {
               g_get_current_time(&gfinaltime);
               g_time_val_add(&gfinaltime, (glong) (timeout / 1000));
               devent = g_async_queue_timed_pop(eventq, &gfinaltime);
       }
       g_async_queue_unref(eventq);

       if (devent) {
               memcpy(event, devent, sizeof(struct oh_event));
               g_free(devent);
               return SA_OK;
       } else {
               memset(event, 0, sizeof(struct oh_event));
               return SA_ERR_HPI_TIMEOUT;
       }
}
Ejemplo n.º 6
0
static gpointer oh_event_thread_loop(gpointer data)
{
        GTimeVal time;

        while(oh_run_threaded()) {
                dbg("About to run through the event loop");

                oh_get_events();

                g_get_current_time(&time);
                g_time_val_add(&time, OH_THREAD_SLEEP_TIME);
                dbg("Going to sleep");

                if (g_cond_timed_wait(oh_thread_wait, oh_thread_mutex, &time))
                        dbg("SIGNALED: Got signal from plugin");
                else
                        dbg("TIMEDOUT: Woke up, am looping again");
        }
        g_thread_exit(0);
        return 0;
}
Ejemplo n.º 7
0
/**
 * Wait until no collections have changed for 10 seconds, then sync.
 * @internal
 */
static gpointer
do_loop (gpointer udata)
{
	xmms_coll_dag_t *dag = udata;
	GTimeVal time;

	g_mutex_lock (mutex);

	while (keep_running) {
		if (!want_sync) {
			g_cond_wait (cond, mutex);
		}

		/* Wait until no requests have been filed for 10 seconds. */
		while (keep_running && want_sync) {
			want_sync = FALSE;

			g_get_current_time (&time);
			g_time_val_add (&time, 10000000);

			g_cond_timed_wait (cond, mutex, &time);
		}

		if (keep_running) {
			/* The dag might be locked when calling schedule_sync, so we need to
			 * unlock to avoid deadlocks */
			g_mutex_unlock (mutex);

			XMMS_DBG ("Syncing collections to database.");
			xmms_collection_sync (dag);

			g_mutex_lock (mutex);
		}
	}

	g_mutex_unlock (mutex);

	return NULL;
}
Ejemplo n.º 8
0
static inline void
wait_for_state (GOmxCore *core,
                OMX_STATETYPE state)
{
    GTimeVal tv;
    gboolean signaled;

    g_mutex_lock (core->omx_state_mutex);

    if (core->omx_error != OMX_ErrorNone)
        goto leave;

    g_get_current_time (&tv);
    g_time_val_add (&tv, 15 * G_USEC_PER_SEC);

    /* try once */
    if (core->omx_state != state)
    {
        signaled = g_cond_timed_wait (core->omx_state_condition, core->omx_state_mutex, &tv);

        if (!signaled)
        {
            GST_ERROR_OBJECT (core->object, "timed out switching from '%s' to '%s'",
                              omx_state_to_str(core->omx_state), omx_state_to_str(state));
        }
    }

    if (core->omx_error != OMX_ErrorNone)
        goto leave;

    if (core->omx_state != state)
    {
        GST_ERROR_OBJECT (core->object, "wrong state received: state=%d, expected=%d",
                          core->omx_state, state);
    }

leave:
    g_mutex_unlock (core->omx_state_mutex);
}
Ejemplo n.º 9
0
static gboolean
tilem_anim_iter_advance(GdkPixbufAnimationIter *giter,
                        const GTimeVal *current_time)
{
	TilemAnimIter *iter = TILEM_ANIM_ITER(giter);
	int ms;

	g_return_val_if_fail(TILEM_IS_ANIM_ITER(iter), FALSE);
	g_return_val_if_fail(iter->anim != NULL, FALSE);
	g_return_val_if_fail(iter->frame != NULL, FALSE);

	ms = ((current_time->tv_usec - iter->current_time.tv_usec) / 1000
	      + (current_time->tv_sec - iter->current_time.tv_sec) * 1000);

	g_time_val_add(&iter->current_time, ms * 1000);

	ms *= iter->anim->speed;

	ms += iter->time_elapsed;
	if (ms < iter->frame->duration) {
		iter->time_elapsed = ms;
		return FALSE;
	}

	if (iter->pixbuf)
		g_object_unref(iter->pixbuf);
	iter->pixbuf = NULL;

	while (ms >= iter->frame->duration) {
		ms -= iter->frame->duration;
		if (iter->frame->next)
			iter->frame = iter->frame->next;
		else
			iter->frame = iter->anim->start;
	}

	iter->time_elapsed = ms;
	return TRUE;
}
Ejemplo n.º 10
0
gboolean
egg_test_wait_until (int timeout)
{
	GTimeVal tv;
	gboolean ret;

	g_get_current_time (&tv);
	g_time_val_add (&tv, timeout * 1000);

	g_assert (wait_mutex);
	g_assert (wait_condition);
	g_mutex_lock (wait_mutex);
		g_assert (!wait_waiting);
		wait_waiting = TRUE;
		g_cond_broadcast (wait_start);
		ret = g_cond_timed_wait (wait_condition, wait_mutex, &tv);
		g_assert (wait_waiting);
		wait_waiting = FALSE;
	g_mutex_unlock (wait_mutex);

	return ret;
}
Ejemplo n.º 11
0
/*
 * Monotonic time value caching havn't been implemented yet in this
 * glib version, so we need to do more work for these purposes below:
 * (1) Avoid invoking system call for getting monotonic time frequently.
 * (2) Action of timeout can't be affected by user time setting.
*/
static __inline__ void
hm_watch_init_time(HmWatch *watch)
{
    BUG_ON(!watch || !watch->conn);

    /* source may have not been attached to loop yet, so get it directly */
    g_get_current_time(&watch->next_timeout);

#ifdef USE_MONOTONIC_CLOCK
    gint mono_sec = hm_watch_get_monotonic_sec(watch);
    if (mono_sec)
    {
        watch->delta_time = watch->next_timeout.tv_sec - mono_sec;
    }
    else
    {
        watch->delta_time = MONOTIME_ERR;
    }
#endif

    g_time_val_add(&watch->next_timeout, 
        hm_connection_get_timeout(watch->conn) * 1000);
}
Ejemplo n.º 12
0
int
schro_async_wait_locked (SchroAsync * async)
{
    GTimeVal ts;
    int ret;

    g_get_current_time (&ts);
    g_time_val_add (&ts, 1000000);
    ret = g_cond_timed_wait (async->app_cond, async->mutex, &ts);
    if (!ret) {
        int i;
        for (i = 0; i < async->n_threads; i++) {
            if (async->threads[i].busy != 0)
                break;
        }
        if (i == async->n_threads) {
            SCHRO_WARNING ("timeout.  deadlock?");
            schro_async_dump (async);
            return FALSE;
        }
    }
    return TRUE;
}
Ejemplo n.º 13
0
static void
WaitForManualEvent(ManualEvent ME)
{
#if GLIB_CHECK_VERSION (2,32,0)
    gint64 end_time;
#else
    GTimeVal tv;
#endif
    multi_debug("wait for manual event locks");
#if GLIB_CHECK_VERSION (2,32,0)
    g_mutex_lock(&condMutex);
    end_time = g_get_monotonic_time() + 10 * G_TIME_SPAN_SECOND;
#else
    g_mutex_lock(condMutex);
#endif
    while (!ME->signalled) {
        multi_debug("waiting for manual event");
#if GLIB_CHECK_VERSION (2,32,0)
        if (!g_cond_wait_until(&ME->cond, &condMutex, end_time))
#else
        g_get_current_time(&tv);
        g_time_val_add(&tv, 10 * 1000 * 1000);
        if (g_cond_timed_wait(ME->cond, condMutex, &tv))
#endif
            break;
        else {
            multi_debug("still waiting for manual event");
        }
    }

#if GLIB_CHECK_VERSION (2,32,0)
    g_mutex_unlock(&condMutex);
#else
    g_mutex_unlock(condMutex);
#endif
    multi_debug("wait for manual event unlocks");
}
Ejemplo n.º 14
0
/*
 * This function can be called any time when pattern-db is not processing
 * messages, but we expect the correllation timer to move forward.  It
 * doesn't need to be called absolutely regularly as it'll use the current
 * system time to determine how much time has passed since the last
 * invocation.  See the timing comment at pattern_db_process() for more
 * information.
 */
void
pattern_db_timer_tick(PatternDB *self)
{
  GTimeVal now;
  glong diff;
  PDBProcessParams process_params_p = {0};
  PDBProcessParams *process_params = &process_params_p;

  g_static_rw_lock_writer_lock(&self->lock);
  self->timer_process_params = process_params;
  cached_g_current_time(&now);
  diff = g_time_val_diff(&now, &self->last_tick);

  if (diff > 1e6)
    {
      glong diff_sec = (glong) (diff / 1e6);

      timer_wheel_set_time(self->timer_wheel, timer_wheel_get_time(self->timer_wheel) + diff_sec);
      msg_debug("Advancing patterndb current time because of timer tick",
                evt_tag_long("utc", timer_wheel_get_time(self->timer_wheel)));
      /* update last_tick, take the fraction of the seconds not calculated into this update into account */

      self->last_tick = now;
      g_time_val_add(&self->last_tick, - (glong)(diff - diff_sec * 1e6));
    }
  else if (diff < 0)
    {
      /* time moving backwards, this can only happen if the computer's time
       * is changed.  We don't update patterndb's idea of the time now, wait
       * another tick instead to update that instead.
       */
      self->last_tick = now;
    }
  self->timer_process_params = NULL;
  g_static_rw_lock_writer_unlock(&self->lock);
  _flush_emitted_messages(self, process_params);
}
Ejemplo n.º 15
0
void CNewFilesBox::OnThreadProc(void)
{
	GTimeVal time;

	g_mutex_lock(m_DataMutex);
	while (!Cancelled()) {
		m_Delay = true;
		g_mutex_unlock(m_DataMutex);
		m_Checker.Check(NewFrameCB, this);
		g_mutex_lock(m_DataMutex);
		if (m_Delay && !m_StopThread) {
			if (m_State!=STATE_WAIT) {
				m_State = STATE_WAIT;
				PushMessage(EVENT_UPDATE, 0);
			}
			g_get_current_time(&time);
			g_time_val_add(&time, 3000000);
			g_cond_timed_wait(m_Cond, m_DataMutex, &time);
		}
	}
	m_State = STATE_STOP;
	PushMessage(EVENT_UPDATE, 0);
	g_mutex_unlock(m_DataMutex);
}
Ejemplo n.º 16
0
static gint
_expire_specific_base(sqlx_cache_t *cache, sqlx_base_t *b, GTimeVal *now,
                      time_t grace_delay)
{
    if (now) {
        GTimeVal pivot;
        memcpy(&pivot, now, sizeof(GTimeVal));
        g_time_val_add(&pivot, grace_delay * -1000000L);
        if (gtv_bigger(&(b->last_update), &pivot))
            return 0;
    }

    /* At this point, I have the global lock, and the base is IDLE.
     * We know no one have the lock on it. So we make the base USED
     * and we get the lock on it. because we have the lock, it is
     * protected from other uses */

    EXTRA_ASSERT(b->status == SQLX_BASE_IDLE || b->status == SQLX_BASE_IDLE_HOT);
    EXTRA_ASSERT(b->count_open == 0);
    EXTRA_ASSERT(b->owner == NULL);

    /* make it used and locked by the current thread */
    b->owner = g_thread_self();
    sqlx_base_move_to_list(cache, b, SQLX_BASE_USED);

    _expire_base(cache, b);

    /* If someone is waiting on the base while it is being closed
     * (this arrives when someone tries to read it again after
     * waiting exactly the grace delay), we must notify him so it can
     * retry (and open it in another file descriptor).
     * See bug TO-HONEYCOMB-774 */
    g_cond_signal(b->cond);

    return 1;
}
static gpointer
dropbox_command_client_thread(DropboxCommandClient *dcc) {
  struct sockaddr_un addr;
  socklen_t addr_len;
  int connection_attempts = 1;

  /* intialize address structure */
  addr.sun_family = AF_UNIX;
  g_snprintf(addr.sun_path,
	     sizeof(addr.sun_path),
	     "%s/.dropbox/command_socket",
	     g_get_home_dir());
  addr_len = sizeof(addr) - sizeof(addr.sun_path) + strlen(addr.sun_path);

  while (1) {
    GIOChannel *chan = NULL;
    GError *gerr = NULL;
    int sock;
    gboolean failflag = TRUE;

    do {
      int flags;

      if (0 > (sock = socket(PF_UNIX, SOCK_STREAM, 0))) {
	/* WTF */
	break;
      }

      /* set timeout on socket, to protect against
	 bad servers */
      {
	struct timeval tv = {3, 0};
	if (0 > setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
			   &tv, sizeof(struct timeval)) ||
	    0 > setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
			   &tv, sizeof(struct timeval))) {
	  /* debug("setsockopt failed"); */
	  break;
	}
      }

      /* set native non-blocking, for connect timeout */
      {
	if ((flags = fcntl(sock, F_GETFL, 0)) < 0 ||
	    fcntl(sock, F_SETFL, flags | O_NONBLOCK) < 0) {
	  /* debug("fcntl failed"); */
	  break;
	}
      }

      /* if there was an error we have to try again later */
      if (connect(sock, (struct sockaddr *) &addr, addr_len) < 0) {
	if (errno == EINPROGRESS) {
	  fd_set writers;
	  struct timeval tv = {1, 0};

	  FD_ZERO(&writers);
	  FD_SET(sock, &writers);

	  /* if nothing was ready after 3 seconds, fail out homie */
	  if (select(sock+1, NULL, &writers, NULL, &tv) == 0) {
	    /* debug("connection timeout"); */
	    break;
	  }

	  if (connect(sock, (struct sockaddr *) &addr, addr_len) < 0) {
	    /*	    debug("couldn't connect to command server after 1 second"); */
	    break;
	  }
	}
	/* errno != EINPROGRESS */
	else {
	  /*	  debug("bad connection"); */
	  break;
	}
      }

      /* set back to blocking */
      if (fcntl(sock, F_SETFL, flags) < 0) {
	/* debug("fcntl2 failed"); */
	break;
      }

      failflag = FALSE;
    } while (0);

    if (failflag) {
      ConnectionAttempt *ca = g_new(ConnectionAttempt, 1);
      ca->dcc = dcc;
      ca->connect_attempt = connection_attempts;
      g_idle_add((GSourceFunc) on_connection_attempt, ca);
      if (sock >= 0) {
	close(sock);
      }
      g_usleep(G_USEC_PER_SEC);
      connection_attempts++;
      continue;
    }
    else {
      connection_attempts = 0;
    }

    /* connected */
    debug("command client connected");

    chan = g_io_channel_unix_new(sock);
    g_io_channel_set_close_on_unref(chan, TRUE);
    g_io_channel_set_line_term(chan, "\n", -1);

#define SET_CONNECTED_STATE(s)     {			\
      g_mutex_lock(dcc->command_connected_mutex);	\
      dcc->command_connected = s;			\
      g_mutex_unlock(dcc->command_connected_mutex);	\
    }

    SET_CONNECTED_STATE(TRUE);

    g_idle_add((GSourceFunc) on_connect, dcc);

    while (1) {
      DropboxCommand *dc;

      while (1) {
	GTimeVal gtv;

	g_get_current_time(&gtv);
	g_time_val_add(&gtv, G_USEC_PER_SEC / 10);
	/* get a request from caja */
	dc = g_async_queue_timed_pop(dcc->command_queue, &gtv);
	if (dc != NULL) {
	  break;
	}
	else {
	  if (check_connection(chan) == FALSE) {
	    goto BADCONNECTION;
	  }
	}
      }

      /* this pointer should be unique */
      if ((gpointer (*)(DropboxCommandClient *data)) dc == &dropbox_command_client_thread) {
	debug("got a reset request");
	goto BADCONNECTION;
      }

      switch (dc->request_type) {
      case GET_FILE_INFO: {
	debug("doing file info command");
	do_file_info_command(chan, (DropboxFileInfoCommand *) dc, &gerr);
      }
	break;
      case GENERAL_COMMAND: {
	debug("doing general command");
	do_general_command(chan, (DropboxGeneralCommand *) dc, &gerr);
      }
	break;
      default: 
	g_assert_not_reached();
	break;
      }
      
      debug("done.");

      if (gerr != NULL) {
	//	debug("COMMAND ERROR*****************************");
	/* mark this request as never to be completed */
	end_request(dc);

	debug("command error: %s", gerr->message);
	
	g_error_free(gerr);
      BADCONNECTION:
	/* grab all the rest of the data off the async queue and mark it
	   never to be completed, who knows how long we'll be disconnected */
	while ((dc = g_async_queue_try_pop(dcc->command_queue)) != NULL) {
	  end_request(dc);
	}

	g_io_channel_unref(chan);

	SET_CONNECTED_STATE(FALSE);

	/* call the disconnect handler */
	g_idle_add((GSourceFunc) on_disconnect, dcc);

	break;
      }
    }
    
#undef SET_CONNECTED_STATE
  }
  
  return NULL;
}
Ejemplo n.º 18
0
static
int
UNSAFE_resolver_direct_reload (struct resolver_direct_s *r, gboolean locked, GError **err)
{
	int rc = 0;

	DEBUG("META0 cache reload wanted");
	gscstat_tags_start(GSCSTAT_SERVICE_META0, GSCSTAT_TAGS_REQPROCTIME);
	/*sanity checks*/
	if (!r)
	{
		GSETERROR(err, "invalid parameter");
		goto exit_label;
	}

	/*start a critical section to access the state of the resolver*/
	if (!locked)
		M0CACHE_LOCK(*r);
	
	if (r->refresh_pending)
	{
		GTimeVal gtv;
		
		DEBUG("META0 cache already being refreshed");

		g_get_current_time (&gtv);
		g_time_val_add (&gtv, COND_MAXWAIT_MS * 1000);

		if (g_cond_timed_wait(r->refresh_condition, r->use_mutex, &gtv)) {
			/*when signal is thrown, and g_condwait return, mutex is locked*/
			if (!locked) M0CACHE_UNLOCK(*r);
			rc = 1;
		} else {
			/*timeout*/
			if (!locked) M0CACHE_UNLOCK(*r);
			GSETERROR(err,"timeout on a pending refresh");
			rc = 0;
		}
	}
	else
	{
		GPtrArray *newMappings=NULL;
		
		/*mark the resolver as being refreshed and leave the critical section*/
		r->refresh_pending = TRUE;

		if (!locked)
			M0CACHE_UNLOCK(*r);

		/*contact meta0 and build a reference */
		newMappings = build_meta0_cache (r, err);
		
		if (!locked)
			M0CACHE_LOCK(*r);
		
		if (!newMappings) {
			/*refresh error*/
			ERROR("Cannot refresh the META0 cache");
			rc=0;
		} else {
			if ( r->mappings )
			{
				g_ptr_array_foreach (r->mappings, _clean_cache_entry, NULL);
				g_ptr_array_free (r->mappings, TRUE);
				r->mappings = NULL;
			}
			r->mappings = newMappings;
			DEBUG("META0 cache has been refreshed");
			rc=1;
		}

		/* refresh done, change the state in a critical section and wake up
		 * all the threads waiting on the condition. */
		r->refresh_pending = FALSE;	
		g_cond_broadcast (r->refresh_condition);
		
		if (!locked)
			M0CACHE_UNLOCK(*r);
	}

exit_label:	

	gscstat_tags_end(GSCSTAT_SERVICE_META0, GSCSTAT_TAGS_REQPROCTIME);
	return rc;

}
Ejemplo n.º 19
0
void jcr_queue_deliver(void *a) {
  extern jcr_instance jcr;
  GIOStatus rc = G_IO_STATUS_NORMAL;
  GString *buffer;
  gsize bytes;
  int left, len, pkts;
  dpacket d;
  GTimeVal timeout;

  int buf_size = j_atoi(xmlnode_get_data(xmlnode_get_tag(jcr->config,"send-buffer")), 8192);

  log_warn(JDBG, "packet delivery thread starting.");
  buffer = g_string_new(NULL);
  while(TRUE) {

    g_string_set_size(buffer, 0);
    pkts = 0;
    g_get_current_time(&timeout);
    g_time_val_add(&timeout, (5 * G_USEC_PER_SEC));
    d = (dpacket)g_async_queue_timed_pop(jcr->dqueue, &timeout);

    if (d == NULL) {
      if (jcr->stream_state == _STREAM_CONNECTED)
         continue;
      else
         break;
    }

    g_string_append(buffer, xmlnode2str(d->x));
    xmlnode_free(d->x);
    d = NULL;
    left = len = buffer->len;
    pkts++;

    while ((g_async_queue_length(jcr->dqueue) > 0) && (buffer->len < buf_size)) {
      d = (dpacket)g_async_queue_pop(jcr->dqueue);
      g_string_append(buffer, xmlnode2str(d->x));
      xmlnode_free(d->x);
      d = NULL;
      left = len = buffer->len;
      pkts++;
    }

    //   log_debug(JDBG, "%d '%s'", len, buf);

    while ((left > 0) && (rc == G_IO_STATUS_NORMAL)) {
      rc = g_io_channel_write_chars(jcr->gio, (buffer->str+(len - left)), left, &bytes, NULL);
      left = left - bytes;

      if (rc != G_IO_STATUS_NORMAL) {
        log_warn(JDBG, "Send packet failed, dropping packet");
      }

      log_debug(JDBG, "wrote %d packets of %d bytes", pkts, bytes);
      //    fprintf(stderr, "wrote %d packets of %d bytes\n", pkts, bytes);
      if (left==0){
        //queue is empty, flushing the socket
        g_io_channel_flush(jcr->gio, NULL);
      }
    }
  }
  log_warn(JDBG, "packet delivery thread exiting.");
  log_warn(JDBG, "  Last DvryQ Buffer='%.*s'", buffer->len, buffer->str);
  g_string_free(buffer, TRUE);
}
Ejemplo n.º 20
0
/**
 * gst_bus_timed_pop_filtered:
 * @bus: a #GstBus to pop from
 * @timeout: a timeout in nanoseconds, or GST_CLOCK_TIME_NONE to wait forever
 * @types: message types to take into account, GST_MESSAGE_ANY for any type
 *
 * Get a message from the bus whose type matches the message type mask @types,
 * waiting up to the specified timeout (and discarding any messages that do not
 * match the mask provided).
 *
 * If @timeout is 0, this function behaves like gst_bus_pop_filtered(). If
 * @timeout is #GST_CLOCK_TIME_NONE, this function will block forever until a
 * matching message was posted on the bus.
 *
 * Returns: a #GstMessage matching the filter in @types, or NULL if no matching
 * message was found on the bus until the timeout expired.
 * The message is taken from the bus and needs to be unreffed with
 * gst_message_unref() after usage.
 *
 * MT safe.
 *
 * Since: 0.10.15
 */
GstMessage *
gst_bus_timed_pop_filtered (GstBus * bus, GstClockTime timeout,
    GstMessageType types)
{
  GstMessage *message;
  GTimeVal *timeval, abstimeout;
  gboolean first_round = TRUE;

  g_return_val_if_fail (GST_IS_BUS (bus), NULL);
  g_return_val_if_fail (types != 0, NULL);

  g_mutex_lock (bus->queue_lock);

  while (TRUE) {
    GST_LOG_OBJECT (bus, "have %d messages", g_queue_get_length (bus->queue));

    while ((message = g_queue_pop_head (bus->queue))) {
      GST_DEBUG_OBJECT (bus, "got message %p, %s, type mask is %u",
          message, GST_MESSAGE_TYPE_NAME (message), (guint) types);
      if ((GST_MESSAGE_TYPE (message) & types) != 0) {
        /* exit the loop, we have a message */
        goto beach;
      } else {
        GST_DEBUG_OBJECT (bus, "discarding message, does not match mask");
        gst_message_unref (message);
        message = NULL;
      }
    }

    /* no need to wait, exit loop */
    if (timeout == 0)
      break;

    if (timeout == GST_CLOCK_TIME_NONE) {
      /* wait forever */
      timeval = NULL;
    } else if (first_round) {
      glong add = timeout / 1000;

      if (add == 0)
        /* no need to wait */
        break;

      /* make timeout absolute */
      g_get_current_time (&abstimeout);
      g_time_val_add (&abstimeout, add);
      timeval = &abstimeout;
      first_round = FALSE;
      GST_DEBUG_OBJECT (bus, "blocking for message, timeout %ld", add);
    } else {
      /* calculated the absolute end time already, no need to do it again */
      GST_DEBUG_OBJECT (bus, "blocking for message, again");
      timeval = &abstimeout;    /* fool compiler */
    }
    if (!g_cond_timed_wait (bus->priv->queue_cond, bus->queue_lock, timeval)) {
      GST_INFO_OBJECT (bus, "timed out, breaking loop");
      break;
    } else {
      GST_INFO_OBJECT (bus, "we got woken up, recheck for message");
    }
  }

beach:

  g_mutex_unlock (bus->queue_lock);

  return message;
}
Ejemplo n.º 21
0
/**
 * afsql_dd_database_thread:
 *
 * This is the thread inserting records into the database.
 **/
static gpointer
afsql_dd_database_thread(gpointer arg)
{
  AFSqlDestDriver *self = (AFSqlDestDriver *) arg;

  msg_verbose("Database thread started",
              evt_tag_str("driver", self->super.super.id),
              NULL);
  while (!self->db_thread_terminate)
    {
      g_mutex_lock(self->db_thread_mutex);
      if (self->db_thread_suspended)
        {
          /* we got suspended, probably because of a connection error,
           * during this time we only get wakeups if we need to be
           * terminated. */
          if (!self->db_thread_terminate)
            g_cond_timed_wait(self->db_thread_wakeup_cond, self->db_thread_mutex, &self->db_thread_suspend_target);
          self->db_thread_suspended = FALSE;
          g_mutex_unlock(self->db_thread_mutex);

          /* we loop back to check if the thread was requested to terminate */
        }
      else if (log_queue_get_length(self->queue) == 0)
        {
          /* we have nothing to INSERT into the database, let's wait we get some new stuff */

          if (self->flush_lines_queued > 0 && self->flush_timeout > 0)
            {
              GTimeVal flush_target;

              g_get_current_time(&flush_target);
              g_time_val_add(&flush_target, self->flush_timeout * 1000);
              if (!self->db_thread_terminate && !g_cond_timed_wait(self->db_thread_wakeup_cond, self->db_thread_mutex, &flush_target))
                {
                  /* timeout elapsed */
                  if (!afsql_dd_commit_txn(self, FALSE))
                    {
                      afsql_dd_disconnect(self);
                      afsql_dd_suspend(self);
                      g_mutex_unlock(self->db_thread_mutex);
                      continue;
                    }
                }
            }
          else if (!self->db_thread_terminate)
            {
              g_cond_wait(self->db_thread_wakeup_cond, self->db_thread_mutex);
            }
          g_mutex_unlock(self->db_thread_mutex);

          /* we loop back to check if the thread was requested to terminate */
        }
      else
        g_mutex_unlock(self->db_thread_mutex);

      if (self->db_thread_terminate)
        break;

      if (!afsql_dd_insert_db(self))
        {
          afsql_dd_disconnect(self);
          afsql_dd_suspend(self);
        }
    }
  if (self->flush_lines_queued > 0)
    {
      /* we can't do anything with the return value here. if commit isn't
       * successful, we get our backlog back, but we have no chance
       * submitting that back to the SQL engine.
       */

      afsql_dd_commit_txn(self, TRUE);
    }

  afsql_dd_disconnect(self);

  msg_verbose("Database thread finished",
              evt_tag_str("driver", self->super.super.id),
              NULL);
  return NULL;
}
static gboolean
_audio_stream_change_format (AudioStreamID stream_id,
    AudioStreamBasicDescription format)
{
  OSStatus status = noErr;
  gint i;
  gboolean ret = FALSE;
  AudioStreamBasicDescription cformat;
  PropertyMutex prop_mutex;

  AudioObjectPropertyAddress formatAddress = {
    kAudioStreamPropertyPhysicalFormat,
    kAudioObjectPropertyScopeGlobal,
    kAudioObjectPropertyElementMaster
  };

  GST_DEBUG ("setting stream format: " CORE_AUDIO_FORMAT,
      CORE_AUDIO_FORMAT_ARGS (format));

  /* Condition because SetProperty is asynchronous */
  g_mutex_init (&prop_mutex.lock);
  g_cond_init (&prop_mutex.cond);

  g_mutex_lock (&prop_mutex.lock);

  /* Install the property listener to serialize the operations */
  status = AudioObjectAddPropertyListener (stream_id, &formatAddress,
      _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectAddPropertyListener failed: %d", (int) status);
    goto done;
  }

  /* Change the format */
  if (!_audio_stream_set_current_format (stream_id, format)) {
    goto done;
  }

  /* The AudioObjectSetProperty is not only asynchronous
   * it is also not atomic in its behaviour.
   * Therefore we check 4 times before we really give up. */
  for (i = 0; i < 4; i++) {
    GTimeVal timeout;

    g_get_current_time (&timeout);
    g_time_val_add (&timeout, 250000);

    if (!g_cond_wait_until (&prop_mutex.cond, &prop_mutex.lock, timeout.tv_sec)) {
      GST_LOG ("timeout...");
    }

    if (_audio_stream_get_current_format (stream_id, &cformat)) {
      GST_DEBUG ("current stream format: " CORE_AUDIO_FORMAT,
          CORE_AUDIO_FORMAT_ARGS (cformat));

      if (cformat.mSampleRate == format.mSampleRate &&
          cformat.mFormatID == format.mFormatID &&
          cformat.mFramesPerPacket == format.mFramesPerPacket) {
        /* The right format is now active */
        break;
      }
    }
  }

  if (cformat.mSampleRate != format.mSampleRate ||
      cformat.mFormatID != format.mFormatID ||
      cformat.mFramesPerPacket != format.mFramesPerPacket) {
    goto done;
  }

  ret = TRUE;

done:
  /* Removing the property listener */
  status = AudioObjectRemovePropertyListener (stream_id,
      &formatAddress, _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectRemovePropertyListener failed: %d", (int) status);
  }
  /* Destroy the lock and condition */
  g_mutex_unlock (&prop_mutex.lock);
  g_mutex_clear (&prop_mutex.lock);
  g_cond_clear (&prop_mutex.cond);

  return ret;
}
static GRealThreadPool*
g_thread_pool_wait_for_new_pool (void)
{
  GRealThreadPool *pool;
  gint local_wakeup_thread_serial;
  guint local_max_unused_threads;
  gint local_max_idle_time;
  gint last_wakeup_thread_serial;
  gboolean have_relayed_thread_marker = FALSE;

  local_max_unused_threads = g_atomic_int_get (&max_unused_threads);
  local_max_idle_time = g_atomic_int_get (&max_idle_time);
  last_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial);

  g_atomic_int_inc (&unused_threads);

  do
    {
      if (g_atomic_int_get (&unused_threads) >= local_max_unused_threads)
	{
	  /* If this is a superfluous thread, stop it. */
	  pool = NULL;
	}
      else if (local_max_idle_time > 0)
	{
	  /* If a maximal idle time is given, wait for the given time. */
	  GTimeVal end_time;

	  g_get_current_time (&end_time);
	  g_time_val_add (&end_time, local_max_idle_time * 1000);

	  DEBUG_MSG (("thread %p waiting in global pool for %f seconds.",
		      g_thread_self (), local_max_idle_time / 1000.0));

	  pool = g_async_queue_timed_pop (unused_thread_queue, &end_time);
	}
      else
	{
	  /* If no maximal idle time is given, wait indefinitely. */
	  DEBUG_MSG (("thread %p waiting in global pool.",
		      g_thread_self ()));
	  pool = g_async_queue_pop (unused_thread_queue);
	}

      if (pool == wakeup_thread_marker)
	{
	  local_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial);
	  if (last_wakeup_thread_serial == local_wakeup_thread_serial)
	    {
	      if (!have_relayed_thread_marker)
	      {
		/* If this wakeup marker has been received for
		 * the second time, relay it. 
		 */
		DEBUG_MSG (("thread %p relaying wakeup message to "
			    "waiting thread with lower serial.",
			    g_thread_self ()));

		g_async_queue_push (unused_thread_queue, wakeup_thread_marker);
		have_relayed_thread_marker = TRUE;

		/* If a wakeup marker has been relayed, this thread
		 * will get out of the way for 100 microseconds to
		 * avoid receiving this marker again. */
		g_usleep (100);
	      }
	    }
	  else
	    {
	      if (g_atomic_int_exchange_and_add (&kill_unused_threads, -1) > 0)
	        {
		  pool = NULL;
		  break;
		}

	      DEBUG_MSG (("thread %p updating to new limits.",
			  g_thread_self ()));

	      local_max_unused_threads = g_atomic_int_get (&max_unused_threads);
	      local_max_idle_time = g_atomic_int_get (&max_idle_time);
	      last_wakeup_thread_serial = local_wakeup_thread_serial;

	      have_relayed_thread_marker = FALSE;
	    }
	}
    }
  while (pool == wakeup_thread_marker);

  g_atomic_int_add (&unused_threads, -1);

  return pool;
}
Ejemplo n.º 24
0
static void
swfdec_iterate_source_advance_cb (SwfdecPlayer *player, guint msecs, 
    guint audio_frames, SwfdecIterateSource *source)
{
  g_time_val_add (&source->last, msecs * 1000 * source->speed);
}
Ejemplo n.º 25
0
static gboolean
gst_hls_demux_cache_fragments (GstHLSDemux * demux)
{
  gint i;

  /* Start parsing the main playlist */
  gst_m3u8_client_set_current (demux->client, demux->client->main);

  if (gst_m3u8_client_is_live (demux->client)) {
    if (!gst_hls_demux_update_playlist (demux, FALSE)) {
      GST_ERROR_OBJECT (demux, "Could not fetch the main playlist %s",
          demux->client->main->uri);
      return FALSE;
    }
  }

  /* If this playlist is a variant playlist, select the first one
   * and update it */
  if (gst_m3u8_client_has_variant_playlist (demux->client)) {
    GstM3U8 *child = demux->client->main->current_variant->data;
    gst_m3u8_client_set_current (demux->client, child);
    if (!gst_hls_demux_update_playlist (demux, FALSE)) {
      GST_ERROR_OBJECT (demux, "Could not fetch the child playlist %s",
          child->uri);
      return FALSE;
    }
  }

  /* If it's a live source, set the sequence number to the end of the list
   * and substract the 'fragmets_cache' to start from the last fragment*/
  if (gst_m3u8_client_is_live (demux->client)) {
    demux->client->sequence += g_list_length (demux->client->current->files);
    if (demux->client->sequence >= demux->fragments_cache)
      demux->client->sequence -= demux->fragments_cache;
    else
      demux->client->sequence = 0;
  }

  /* Cache the first fragments */
  for (i = 0; i < demux->fragments_cache; i++) {
    gst_element_post_message (GST_ELEMENT (demux),
        gst_message_new_buffering (GST_OBJECT (demux),
            100 * i / demux->fragments_cache));
    g_get_current_time (&demux->next_update);
    g_time_val_add (&demux->next_update,
        demux->client->current->targetduration * 1000000);
    if (!gst_hls_demux_get_next_fragment (demux, FALSE)) {
      if (!demux->cancelled)
        GST_ERROR_OBJECT (demux, "Error caching the first fragments");
      return FALSE;
    }
    /* make sure we stop caching fragments if something cancelled it */
    if (demux->cancelled)
      return FALSE;
    gst_hls_demux_switch_playlist (demux);
  }
  gst_element_post_message (GST_ELEMENT (demux),
      gst_message_new_buffering (GST_OBJECT (demux), 100));

  g_get_current_time (&demux->next_update);

  demux->need_cache = FALSE;
  return TRUE;
}
static gpointer
rejilla_async_task_manager_thread (RejillaAsyncTaskManager *self)
{
	gboolean result;
	GCancellable *cancel;
	RejillaAsyncTaskCtx *ctx;

	cancel = g_cancellable_new ();

	g_mutex_lock (self->priv->lock);

	while (1) {
		RejillaAsyncTaskResult res;

		/* say we are unused */
		self->priv->unused_threads ++;
	
		/* see if a task is waiting to be executed */
		while (!self->priv->waiting_tasks) {
			if (self->priv->cancelled)
				goto end;

			/* we always keep one thread ready */
			if (self->priv->num_threads - self->priv->unused_threads > 0) {
				GTimeVal timeout;

				/* wait to be woken up for 10 sec otherwise quit */
				g_get_current_time (&timeout);
				g_time_val_add (&timeout, 5000000);
				result = g_cond_timed_wait (self->priv->new_task,
							    self->priv->lock,
							    &timeout);

				if (!result)
					goto end;
			}
			else
				g_cond_wait (self->priv->new_task,
					     self->priv->lock);
		}
	
		/* say that we are active again */
		self->priv->unused_threads --;
	
		/* get the data from the list */
		ctx = self->priv->waiting_tasks->data;
		ctx->cancel = cancel;
		ctx->priority &= ~REJILLA_ASYNC_RESCHEDULE;

		self->priv->waiting_tasks = g_slist_remove (self->priv->waiting_tasks, ctx);
		self->priv->active_tasks = g_slist_prepend (self->priv->active_tasks, ctx);
	
		g_mutex_unlock (self->priv->lock);
		res = ctx->type->thread (self, cancel, ctx->data);
		g_mutex_lock (self->priv->lock);

		/* we remove the task from the list and signal it is finished */
		self->priv->active_tasks = g_slist_remove (self->priv->active_tasks, ctx);
		g_cond_signal (self->priv->task_finished);

		/* NOTE: when threads are cancelled then they are destroyed in
		 * the function that cancelled them to destroy callback_data in
		 * the active main loop */
		if (!g_cancellable_is_cancelled (cancel)) {
			if (res == REJILLA_ASYNC_TASK_RESCHEDULE) {
				if (self->priv->waiting_tasks) {
					RejillaAsyncTaskCtx *next;

					next = self->priv->waiting_tasks->data;
					if (next->priority > ctx->priority)
						rejilla_async_task_manager_insert_task (self, ctx);
					else
						self->priv->waiting_tasks = g_slist_prepend (self->priv->waiting_tasks, ctx);
				}
				else
					self->priv->waiting_tasks = g_slist_prepend (self->priv->waiting_tasks, ctx);
			}
			else {
				if (ctx->type->destroy)
					ctx->type->destroy (self, FALSE, ctx->data);
				g_free (ctx);
			}
		}
		else
			g_cancellable_reset (cancel);
	}

end:

	self->priv->unused_threads --;
	self->priv->num_threads --;

	/* maybe finalize is waiting for us to terminate */
	g_cond_signal (self->priv->thread_finished);
	g_mutex_unlock (self->priv->lock);

	g_object_unref (cancel);

	g_thread_exit (NULL);

	return NULL;
}
/* Called with the write_mutex held */
static void
gst_sunaudio_sink_do_delay (GstSunAudioSink * sink)
{
  GstBaseAudioSink *ba_sink = GST_BASE_AUDIO_SINK (sink);
  GstClockTime total_sleep;
  GstClockTime max_sleep;
  gint sleep_usecs;
  GTimeVal sleep_end;
  gint err;
  audio_info_t ainfo;
  guint diff;

  /* This code below ensures that we don't race any further than buffer_time 
   * ahead of the audio output, by sleeping if the next write call would cause
   * us to advance too far in the ring-buffer */
  LOOP_WHILE_EINTR (err, ioctl (sink->fd, AUDIO_GETINFO, &ainfo));
  if (err < 0)
    goto write_error;

  /* Compute our offset from the output (copes with overflow) */
  diff = (guint) (sink->segs_written) - ainfo.play.eof;
  if (diff > sink->segtotal) {
    /* This implies that reset did a flush just as the sound device aquired
     * some buffers internally, and it causes us to be out of sync with the
     * eof measure. This corrects it */
    sink->segs_written = ainfo.play.eof;
    diff = 0;
  }

  if (diff + 1 < sink->segtotal)
    return;                     /* no need to sleep at all */

  /* Never sleep longer than the initial number of undrained segments in the 
     device plus one */
  total_sleep = 0;
  max_sleep = (diff + 1) * (ba_sink->latency_time * GST_USECOND);
  /* sleep for a segment period between .eof polls */
  sleep_usecs = ba_sink->latency_time;

  /* Current time is our reference point */
  g_get_current_time (&sleep_end);

  /* If the next segment would take us too far along the ring buffer,
   * sleep for a bit to free up a slot. If there were a way to find out
   * when the eof field actually increments, we could use, but the only
   * notification mechanism seems to be SIGPOLL, which we can't use from
   * a support library */
  while (diff + 1 >= sink->segtotal && total_sleep < max_sleep) {
    GST_LOG_OBJECT (sink, "need to block to drain segment(s). "
        "Sleeping for %d us", sleep_usecs);

    g_time_val_add (&sleep_end, sleep_usecs);

    if (g_cond_timed_wait (sink->sleep_cond, sink->write_mutex, &sleep_end)) {
      GST_LOG_OBJECT (sink, "Waking up early due to reset");
      return;                   /* Got told to wake up */
    }
    total_sleep += (sleep_usecs * GST_USECOND);

    LOOP_WHILE_EINTR (err, ioctl (sink->fd, AUDIO_GETINFO, &ainfo));
    if (err < 0)
      goto write_error;

    /* Compute our (new) offset from the output (copes with overflow) */
    diff = (guint) g_atomic_int_get (&sink->segs_written) - ainfo.play.eof;
  }

  return;

write_error:
  GST_ELEMENT_ERROR (sink, RESOURCE, OPEN_WRITE, (NULL),
      ("Playback error on device '%s': %s", sink->device, strerror (errno)));
  return;
}
Ejemplo n.º 28
0
static int
vips_sequential_generate( VipsRegion *or, 
	void *seq, void *a, void *b, gboolean *stop )
{
	VipsSequential *sequential = (VipsSequential *) b;
	VipsObjectClass *class = VIPS_OBJECT_GET_CLASS( sequential );
        VipsRect *r = &or->valid;
	VipsRegion *ir = (VipsRegion *) seq;

	VIPS_DEBUG_MSG_GREEN( "thread %p request for line %d, height %d\n", 
		g_thread_self(), r->top, r->height );

	if( sequential->trace )
		vips_info( class->nickname, 
			"request for line %d, height %d", 
			r->top, r->height );

	VIPS_GATE_START( "vips_sequential_generate: wait" );

	g_mutex_lock( sequential->lock );

	VIPS_GATE_STOP( "vips_sequential_generate: wait" );

	VIPS_DEBUG_MSG_GREEN( "thread %p has lock ...\n", g_thread_self() ); 

	/* If we've seen an error, everything must stop.
	 */
	if( sequential->error ) {
		g_mutex_unlock( sequential->lock );
		return( -1 );
	}

	if( r->top > sequential->y_pos &&
		sequential->y_pos > 0 ) {
		/* This request is for stuff beyond the current read position, 
		 * and this is not the first request. We 
		 * stall for a while to give other threads time to catch up.
		 * 
		 * The stall can be cancelled by a signal on @ready.
		 *
		 * We don't stall forever, since an error would be better than
		 * deadlock, and we don't fail on timeout, since the timeout 
		 * may be harmless.
		 */

#ifdef HAVE_COND_INIT
		gint64 time;

		time = g_get_monotonic_time() + 
			STALL_TIME * G_TIME_SPAN_SECOND;
#else
		GTimeVal time;

		g_get_current_time( &time );
		g_time_val_add( &time, STALL_TIME * 1000000 );
#endif

		VIPS_DEBUG_MSG_GREEN( "thread %p stalling for up to %gs ...\n", 
			g_thread_self(), STALL_TIME ); 

		VIPS_GATE_START( "vips_sequential_generate: wait" );

		/* Exit the loop on timeout or condition passes. We have to
		 * be wary of spurious wakeups. 
		 */
		while( r->top > sequential->y_pos ) {
#ifdef HAVE_COND_INIT
			if( !g_cond_wait_until( sequential->ready, 
				sequential->lock, time ) )
				break;
#else
			if( !g_cond_timed_wait( sequential->ready, 
				sequential->lock, &time ) )
				break;
#endif

			/* We may have woken up because of an eval error.
			 */
			if( sequential->error ) {
				g_mutex_unlock( sequential->lock );
				return( -1 );
			}
		}

		VIPS_GATE_STOP( "vips_sequential_generate: wait" );

		VIPS_DEBUG_MSG_GREEN( "thread %p awake again ...\n", 
			g_thread_self() ); 
	}

	if( r->top > sequential->y_pos ) {
		/* This is a request for something some way down the image, 
		 * and we've fallen through from the stall above. 
		 *
		 * Probably the operation is something like extract_area and 
		 * we should skip the initial part of the image. In fact, 
		 * we read to cache, since it may be useful.
		 */
		VipsRect area;

		VIPS_DEBUG_MSG_GREEN( "thread %p skipping to line %d ...\n", 
			g_thread_self(),
			r->top );

		area.left = 0;
		area.top = sequential->y_pos;
		area.width = 1;
		area.height = r->top - sequential->y_pos;
		if( vips_region_prepare( ir, &area ) ) {
			VIPS_DEBUG_MSG( "thread %p error, unlocking #1 ...\n", 
				g_thread_self() ); 
			sequential->error = -1;
			g_cond_broadcast( sequential->ready );
			g_mutex_unlock( sequential->lock );
			return( -1 );
		}

		sequential->y_pos = VIPS_RECT_BOTTOM( &area );
	}

	/* This is a request for old or present pixels -- serve from cache.
	 * This may trigger further, sequential reads.
	 */
	VIPS_DEBUG_MSG_GREEN( "thread %p reading ...\n", g_thread_self() ); 
	if( vips_region_prepare( ir, r ) ||
		vips_region_region( or, ir, r, r->left, r->top ) ) {
		VIPS_DEBUG_MSG( "thread %p error, unlocking #2 ...\n", 
			g_thread_self() ); 
		sequential->error = -1;
		g_cond_broadcast( sequential->ready );
		g_mutex_unlock( sequential->lock );
		return( -1 );
	}

	if( VIPS_RECT_BOTTOM( r ) > sequential->y_pos ) {
		/* This request has moved the read point. Update it, and wake 
		 * up all stalled threads for a retry.
		 */
		sequential->y_pos = VIPS_RECT_BOTTOM( r );

		VIPS_DEBUG_MSG_GREEN( "thread %p updating y_pos to %d and "
			"waking stalled\n", 
			g_thread_self(),
			sequential->y_pos ); 

		g_cond_broadcast( sequential->ready );
	}

	VIPS_DEBUG_MSG_GREEN( "thread %p unlocking ...\n", g_thread_self() ); 

	g_mutex_unlock( sequential->lock );

	return( 0 );
}
Ejemplo n.º 29
0
static GdkPixbufAnimation*
compose_animation_from_array (animation_param_entry **anim_array, gint array_len, gint pix_size)
{
     GdkPixbufSimpleAnim *result = NULL;
     gint frames = 1, duration = 0,  duration_p, frames_p;
     /* calculate frames count as multiplication of all frames and duration as max */
     gint i;

     GTimeVal start_time = {0, 0};
     for (i = 0; i<array_len; i++){
	  /* initialize iterators for each animation*/
	  animation_param_entry *p_entry = anim_array[i];
	  
	  p_entry->iter = gdk_pixbuf_animation_get_iter (p_entry->anim, &start_time);
	  
	  frames_p = duration_p = 0;
	  pgpug_pixbuf_animation_get_detail (p_entry->anim, &frames_p, &duration_p);

	  duration = duration < duration_p ? duration_p : duration;
	  frames *= frames_p;
     }	  



     float fps = duration > 0 ? frames * 1000 / duration : 1;
     gint frame_change_time = duration / frames; 
     DEBG_MSG ("Fps: %f. frame change time :%dmsec", fps,frame_change_time);

     result = gdk_pixbuf_simple_anim_new (pix_size, pix_size, fps);
 
     gint elapsed_time = 0, frames_counter = 1;
     while (elapsed_time <= duration && frames_counter++ <= frames){

	  animation_param_entry *p_entry = anim_array[0];
	  GdkPixbuf *base_pixbuf = gdk_pixbuf_copy (gdk_pixbuf_animation_iter_get_pixbuf (p_entry->iter));
	  g_time_val_add (&start_time, frame_change_time * 1000);
	  gdk_pixbuf_animation_iter_advance (p_entry->iter, &start_time);

	  for (i = 1; i < array_len; i++) {
	       
	       p_entry = anim_array[i];
	       GdkPixbuf *src_pixbuf = gdk_pixbuf_animation_iter_get_pixbuf (p_entry->iter);
	       gint x_offset, y_offset, src_size;
	       src_size = gdk_pixbuf_get_width (src_pixbuf);

	       
	       pgpug_pixbuf_calculate_composition_offset (pix_size, src_size, p_entry->pos, &x_offset, &y_offset);

	       
	       gdk_pixbuf_composite (src_pixbuf, 
				     base_pixbuf,
				     x_offset, y_offset, 
				     src_size, src_size, 
				     x_offset, y_offset,
				     1.0f, 1.0f,
				     GDK_INTERP_BILINEAR, 255);
	       gboolean res;
	       res = gdk_pixbuf_animation_iter_advance (p_entry->iter, &start_time);
	       
	  }

	  gdk_pixbuf_simple_anim_add_frame (result, base_pixbuf);
	  elapsed_time += frame_change_time;
     }

     gdk_pixbuf_simple_anim_set_loop (result, TRUE);

     for (i=0;i<array_len;i++){
	  g_object_unref (anim_array[i]->iter);
     }

     return GDK_PIXBUF_ANIMATION (result);
}
Ejemplo n.º 30
0
static void
iris_thread_worker_exclusive (IrisThread  *thread,
                              IrisQueue   *queue,
                              gboolean     leader)
{
	GTimeVal        tv_now      = {0,0};
	GTimeVal        tv_req      = {0,0};
	IrisThreadWork *thread_work = NULL;
	gint            per_quanta = 0;      /* Completed items within the
	                                      * last quanta. */
	guint           queued      = 0;     /* Items left in the queue at */
	gboolean        has_resized = FALSE;

	iris_debug (IRIS_DEBUG_THREAD);

	g_get_current_time (&tv_now);
	g_get_current_time (&tv_req);
	queued = iris_queue_length (queue);

	/* Since our thread is in exclusive mode, we are responsible for
	 * asking the scheduler manager to add or remove threads based
	 * on the demand of our work queue.
	 *
	 * If the scheduler has maxed out the number of threads it is
	 * allowed, then we will not ask the scheduler to add more
	 * threads and rebalance.
	 */

get_next_item:

	if (G_LIKELY ((thread_work = iris_queue_pop (queue)) != NULL)) {
		if (!VERIFY_THREAD_WORK (thread_work))
			goto get_next_item;

		iris_thread_work_run (thread_work);
		iris_thread_work_free (thread_work);
		per_quanta++;
	}
	else {
#if 0
		g_warning ("Exclusive thread is done managing, received NULL");
#endif
		return;
	}

	if (G_UNLIKELY (!thread->scheduler->maxed && leader)) {
		g_get_current_time (&tv_now);

		if (G_UNLIKELY (timeout_elapsed (&tv_now, &tv_req))) {
			/* We check to see if we have a bunch more work to do
			 * or a potential edge case where we are processing about
			 * the same speed as the pusher, but it creates enough
			 * contention where we dont speed up. This is because
			 * some schedulers will round-robin or steal.  And unless
			 * we look to add another thread even though we have nothing
			 * in the queue, we know there are more coming.
			 */
			queued = iris_queue_length (queue);
			if (queued == 0 && !has_resized) {
				queued = per_quanta * 2;
				has_resized = TRUE;
			}

			if (per_quanta < queued) {
				/* make sure we are not maxed before asking */
				if (!g_atomic_int_get (&thread->scheduler->maxed))
					iris_scheduler_manager_request (thread->scheduler,
									per_quanta,
									queued);
			}

			per_quanta = 0;
			tv_req = tv_now;
			g_time_val_add (&tv_req, QUANTUM_USECS);
		}
	}

	goto get_next_item;
}