Ejemplo n.º 1
0
bool
AsxParserInternal::handle_element_close (const char* name)
{
	char *expected = (char *) g_queue_pop_head (element_stack);

	if (g_ascii_strcasecmp (expected, name)) {
		raise_error (ASXPARSER_ERROR_UNBALANCED_ELEMENTS, "Invalid closing element found.");
		return false;
	}

	g_free (expected);

	if (end_element_handler)
		end_element_handler (parser, current_element);
	return true;
}
Ejemplo n.º 2
0
static EvSchedulerJob *
ev_job_queue_get_next_unlocked (void)
{
	gint i;
	EvSchedulerJob *job = NULL;
	
	for (i = EV_JOB_PRIORITY_URGENT; i < EV_JOB_N_PRIORITIES; i++) {
		job = (EvSchedulerJob *) g_queue_pop_head (job_queue[i]);
		if (job)
			break;
	}

	ev_debug_message (DEBUG_JOBS, "%s", job ? EV_GET_TYPE_NAME (job->job) : "No jobs in queue");

	return job;
}
Ejemplo n.º 3
0
void
git_pane_send_raw_output_to_editor (AnjutaCommand *command, 
                                    IAnjutaEditor *editor)
{
	GQueue *output;
	gchar *line;

	output = git_raw_output_command_get_output (GIT_RAW_OUTPUT_COMMAND (command));

	while (g_queue_peek_head (output))
	{
		line = g_queue_pop_head (output);
		ianjuta_editor_append (editor, line, strlen (line), NULL);
		g_free (line);
	}
}
Ejemplo n.º 4
0
static void
gst_multi_file_sink_ensure_max_files (GstMultiFileSink * multifilesink)
{
  guint max_files = multifilesink->max_files;

  if (max_files == 0)
    return;

  while (g_queue_get_length (&multifilesink->old_files) >= max_files) {
    gchar *filename;

    filename = g_queue_pop_head (&multifilesink->old_files);
    g_remove (filename);
    g_free (filename);
  }
}
Ejemplo n.º 5
0
/* ...purge render queues */
static inline void sview_purge_buffers(app_data_t *app)
{
    int     i;

    pthread_mutex_lock(&app->lock);

    for (i = 0; i < CAMERAS_NUMBER; i++)
    {
        while (!g_queue_is_empty(&app->render[i]))
        {
            gst_buffer_unref(g_queue_pop_head(&app->render[i]));
        }
    }

    pthread_mutex_unlock(&app->lock);
}
Ejemplo n.º 6
0
static void
gst_live_adder_finalize (GObject * object)
{
    GstLiveAdder *adder = GST_LIVE_ADDER (object);

    g_cond_free (adder->not_empty_cond);

    g_queue_foreach (adder->buffers, (GFunc) gst_mini_object_unref, NULL);
    while (g_queue_pop_head (adder->buffers)) {
    }
    g_queue_free (adder->buffers);

    g_list_free (adder->sinkpads);

    G_OBJECT_CLASS (parent_class)->finalize (object);
}
Ejemplo n.º 7
0
static void
gkm_wrap_prompt_finalize (GObject *obj)
{
	GkmWrapPrompt *self = GKM_WRAP_PROMPT (obj);

	if (self->destroy_data && self->prompt_data)
		(self->destroy_data) (self->prompt_data);
	self->destroy_data = NULL;
	self->prompt_data = NULL;

	while (!g_queue_is_empty(&self->pool))
		g_free (g_queue_pop_head (&self->pool));


	G_OBJECT_CLASS (gkm_wrap_prompt_parent_class)->finalize (obj);
}
Ejemplo n.º 8
0
static void
gst_kate_util_decoder_base_drain_event_queue (GstKateDecoderBase * decoder)
{
  decoder->delay_events = FALSE;

  if (decoder->event_queue->length == 0)
    return;

  GST_DEBUG_OBJECT (decoder, "We can now drain all events!");
  while (decoder->event_queue->length) {
    GstKateDecoderBaseQueuedEvent *item = (GstKateDecoderBaseQueuedEvent *)
        g_queue_pop_head (decoder->event_queue);
    (*item->handler) (item->pad, item->event);
    g_slice_free (GstKateDecoderBaseQueuedEvent, item);
  }
}
Ejemplo n.º 9
0
void calls_status_tcp(struct callmaster *m, struct control_stream *s) {
	GQueue q = G_QUEUE_INIT;
	struct call *c;

	callmaster_get_all_calls(m, &q);

	control_stream_printf(s, "proxy %u "UINT64F"/%i/%i\n",
		g_queue_get_length(&q),
		atomic64_get(&m->stats.bytes), 0, 0);

	while (q.head) {
		c = g_queue_pop_head(&q);
		call_status_iterator(c, s);
		obj_put(c);
	}
}
Ejemplo n.º 10
0
/**
 * gst_app_sink_pull_buffer:
 * @appsink: a #GstAppSink
 *
 * This function blocks until a buffer or EOS becomes available or the appsink
 * element is set to the READY/NULL state.
 *
 * This function will only return buffers when the appsink is in the PLAYING
 * state. All rendered buffers will be put in a queue so that the application
 * can pull buffers at its own rate. Note that when the application does not
 * pull buffers fast enough, the queued buffers could consume a lot of memory,
 * especially when dealing with raw video frames.
 *
 * If an EOS event was received before any buffers, this function returns
 * %NULL. Use gst_app_sink_is_eos () to check for the EOS condition.
 *
 * Returns: a #GstBuffer or NULL when the appsink is stopped or EOS.
 */
GstBuffer *
gst_app_sink_pull_buffer (GstAppSink * appsink)
{
  GstBuffer *buf = NULL;

  //printf("pull_buffer\n");

  g_return_val_if_fail (appsink != NULL, NULL);
  g_return_val_if_fail (GST_IS_APP_SINK (appsink), NULL);

  g_mutex_lock (appsink->mutex);

  while (TRUE) {
    GST_DEBUG_OBJECT (appsink, "trying to grab a buffer");
    if (!appsink->started)
      goto not_started;

    if (!g_queue_is_empty (appsink->queue))
      break;

    if (appsink->is_eos)
      goto eos;

    /* nothing to return, wait */
    GST_DEBUG_OBJECT (appsink, "waiting for a buffer");
    g_cond_wait (appsink->cond, appsink->mutex);
  }
  buf = (GstBuffer*)g_queue_pop_head (appsink->queue);
  GST_DEBUG_OBJECT (appsink, "we have a buffer %p", buf);
  g_mutex_unlock (appsink->mutex);

  return buf;

  /* special conditions */
eos:
  {
    GST_DEBUG_OBJECT (appsink, "we are EOS, return NULL");
    g_mutex_unlock (appsink->mutex);
    return NULL;
  }
not_started:
  {
    GST_DEBUG_OBJECT (appsink, "we are stopped, return NULL");
    g_mutex_unlock (appsink->mutex);
    return NULL;
  }
}
Ejemplo n.º 11
0
/*
 * read one line from the file descriptor
 * timeout: msec unit, -1 for infinite
 * if CR comes then following LF is expected
 * returned string in line is always null terminated, maxlen-1 is maximum string length
 */
static int
read_line(struct rtspcl_data *rtspcld, char *line, int maxlen,
	  int timeout)
{
	g_mutex_lock(rtspcld->mutex);

	GTimeVal end_time;
	if (timeout >= 0) {
		g_get_current_time(&end_time);

		end_time.tv_sec += timeout / 1000;
		timeout %= 1000;
		end_time.tv_usec = timeout * 1000;
		if (end_time.tv_usec > 1000000) {
			end_time.tv_usec -= 1000000;
			++end_time.tv_sec;
		}
	}

	while (true) {
		if (!g_queue_is_empty(rtspcld->received_lines)) {
			/* success, copy to buffer */

			char *p = g_queue_pop_head(rtspcld->received_lines);
			g_mutex_unlock(rtspcld->mutex);

			g_strlcpy(line, p, maxlen);
			g_free(p);

			return strlen(line);
		}

		if (rtspcld->tcp_socket == NULL) {
			/* error */
			g_mutex_unlock(rtspcld->mutex);
			return -1;
		}

		if (timeout < 0) {
			g_cond_wait(rtspcld->cond, rtspcld->mutex);
		} else if (!g_cond_timed_wait(rtspcld->cond, rtspcld->mutex,
					      &end_time)) {
			g_mutex_unlock(rtspcld->mutex);
			return 0;
		}
	}
}
Ejemplo n.º 12
0
/**
 * eva_thread_pool_push:
 * @pool: the pool to add the new task to.
 * @run: function to invoke in the other thread.
 * @handle_result: function to invoke in the main-loop's thread.
 * It is invoked with both @run_data and the return value from @run.
 * @run_data: data to pass to both @run and @handle_result and @destroy.
 * @destroy: function to be invoked once everything else is done,
 * with both @run_data and the return value from @run.
 *
 * Add a new task for the thread-pool.
 *
 * The @run function should be the slow function that must
 * be run in a background thread.
 *
 * The @handle_result function will be called in the current
 * thread (which must be the same as the thread of the main-loop
 * that was used to construct this pool) with the return
 * value of @run.
 *
 * The @destroy function will be invoked in the main thread,
 * after @run and @handle_result are done.
 */
void
eva_thread_pool_push   (EvaThreadPool           *pool,
			EvaThreadPoolRunFunc     run,
			EvaThreadPoolResultFunc  handle_result,
			gpointer                 run_data,
			EvaThreadPoolDestroyFunc destroy)
{
  TaskInfo *info = g_new (TaskInfo, 1);
  ThreadInfo *thread_info;
  g_return_if_fail (pool->destroy_pending == FALSE);
  info->run = run;
  info->handle_result = handle_result;
  info->run_data = run_data;
  info->destroy = destroy;

  g_mutex_lock (pool->lock);
  thread_info = g_queue_pop_head (pool->idle_threads);

  if (thread_info != NULL)
    {
      thread_info->running_task = info;
      g_cond_signal (thread_info->cond);
    }
  else if (pool->max_threads == 0 || pool->num_threads < pool->max_threads)
    {
      GError *error = NULL;
      thread_info = g_new (ThreadInfo, 1);
      thread_info->pool = pool;
      thread_info->cond = g_cond_new ();
      thread_info->running_task = info;
      thread_info->cancelled = FALSE;
      thread_info->thread = g_thread_create (the_thread_func, thread_info, TRUE, &error);
      if (thread_info->thread == NULL)
	{
	  /* uh, destroy thread_info and print a warning. */
	  g_message ("error creating thread: %s", error->message);
	  g_cond_free (thread_info->cond);
	  g_free (thread_info);
	  thread_info = NULL;
	}
      else
	pool->num_threads++;
    }
  if (thread_info == NULL)
    g_queue_push_tail (pool->unstarted_tasks, info);
  g_mutex_unlock (pool->lock);
}
Ejemplo n.º 13
0
static GString *
file_builder_serialise (FileBuilder          *fb,
                        struct gvdb_pointer   root)
{
  struct gvdb_header header = { { 0, }, };
  GString *result;

  if (fb->byteswap)
    {
      header.signature[0] = GVDB_SWAPPED_SIGNATURE0;
      header.signature[1] = GVDB_SWAPPED_SIGNATURE1;
    }
  else
    {
      header.signature[0] = GVDB_SIGNATURE0;
      header.signature[1] = GVDB_SIGNATURE1;
    }

  result = g_string_new (NULL);

  header.root = root;
  g_string_append_len (result, (gpointer) &header, sizeof header);

  while (!g_queue_is_empty (fb->chunks))
    {
      FileChunk *chunk = g_queue_pop_head (fb->chunks);

      if (result->len != chunk->offset)
        {
          gchar zero[8] = { 0, };

          g_assert (chunk->offset > result->len);
          g_assert (chunk->offset - result->len < 8);

          g_string_append_len (result, zero, chunk->offset - result->len);
          g_assert (result->len == chunk->offset);
        }

      g_string_append_len (result, chunk->data, chunk->size);
      g_free (chunk->data);
    }

  g_queue_free (fb->chunks);
  g_slice_free (FileBuilder, fb);

  return result;
}
Ejemplo n.º 14
0
static GstMiniObject *
gst_app_sink_pull_object (GstAppSink * appsink)
{
  GstMiniObject *obj = NULL;

  g_return_val_if_fail (appsink != NULL, NULL);
  g_return_val_if_fail (GST_IS_APP_SINK (appsink), NULL);

  g_mutex_lock (appsink->priv->mutex);

  while (TRUE) {
    GST_DEBUG_OBJECT (appsink, "trying to grab a buffer/list");
    if (!appsink->priv->started)
      goto not_started;

    if (!g_queue_is_empty (appsink->priv->queue))
      break;

    if (appsink->priv->is_eos)
      goto eos;

    /* nothing to return, wait */
    GST_DEBUG_OBJECT (appsink, "waiting for a buffer/list");
    g_cond_wait (appsink->priv->cond, appsink->priv->mutex);
  }
  obj = g_queue_pop_head (appsink->priv->queue);
  GST_DEBUG_OBJECT (appsink, "we have a buffer/list %p", obj);
  g_cond_signal (appsink->priv->cond);
  g_mutex_unlock (appsink->priv->mutex);

  return obj;

  /* special conditions */
eos:
  {
    GST_DEBUG_OBJECT (appsink, "we are EOS, return NULL");
    g_mutex_unlock (appsink->priv->mutex);
    return NULL;
  }
not_started:
  {
    GST_DEBUG_OBJECT (appsink, "we are stopped, return NULL");
    g_mutex_unlock (appsink->priv->mutex);
    return NULL;
  }
}
Ejemplo n.º 15
0
static gboolean
message_handler (gpointer data)
{
    //g_print ("message_handler->enter\n");
    // Make sure we have something in the queue.
    if (g_queue_is_empty (message_queue)) {
        queue_active = FALSE;
    } else {
        MessageData *message_data = g_queue_pop_head (message_queue);
        soup_session_queue_message (message_data->session,
                                    message_data->msg,
                                    message_complete,
                                    message_data);
    }
    //g_print ("message_handler->exit\n");
    return FALSE;
}
Ejemplo n.º 16
0
void
msn_cmdproc_destroy(MsnCmdProc *cmdproc)
{
	MsnTransaction *trans;

	while ((trans = g_queue_pop_head(cmdproc->txqueue)) != NULL)
		msn_transaction_destroy(trans);

	g_queue_free(cmdproc->txqueue);

	msn_history_destroy(cmdproc->history);

	if (cmdproc->last_cmd != NULL)
		msn_command_destroy(cmdproc->last_cmd);

	g_free(cmdproc);
}
Ejemplo n.º 17
0
static int
check_end_condition (CcnetProcessor *processor)
{
    USE_PRIV;

    char *dir_id;
    while (priv->checking_dirs < MAX_CHECKING_DIRS) {
        dir_id = g_queue_pop_head (priv->dir_queue);
        if (!dir_id)
            break;

#ifdef DEBUG
        seaf_debug ("[recvfs] Inspect dir %s.\n", dir_id);
#endif

        if (seaf_obj_store_async_read (seaf->fs_mgr->obj_store,
                                       priv->reader_id,
                                       dir_id) < 0) {
            seaf_warning ("[recvfs] Failed to start async read of %s.\n", dir_id);
            ccnet_processor_send_response (processor, SC_BAD_OBJECT, SS_BAD_OBJECT,
                                           NULL, 0);
            ccnet_processor_done (processor, FALSE);
            return FALSE;
        }
        g_free (dir_id);

        ++(priv->inspect_objects);
        ++(priv->checking_dirs);
    }

    if (priv->checking_dirs > 100)
        seaf_debug ("Number of checking dirs: %d.\n", priv->checking_dirs);
    if (priv->inspect_objects > 1000)
        seaf_debug ("Number of inspect objects: %d.\n", priv->inspect_objects);

    /* Flush periodically. */
    request_object_batch_flush (processor, priv);

    if (priv->pending_objects == 0 && priv->inspect_objects == 0) {
        seaf_debug ("Recv fs end.\n");
        ccnet_processor_send_response (processor, SC_END, SS_END, NULL, 0);
        ccnet_processor_done (processor, TRUE);
        return FALSE;
    } else
        return TRUE;
}
Ejemplo n.º 18
0
/**
 * gs_shell_back_button_cb:
 **/
static void
gs_shell_back_button_cb (GtkWidget *widget, GsShell *shell)
{
	GsShellPrivate *priv = gs_shell_get_instance_private (shell);
	BackEntry *entry;

	g_return_if_fail (!g_queue_is_empty (priv->back_entry_stack));

	entry = g_queue_pop_head (priv->back_entry_stack);

	gs_shell_change_mode (shell, entry->mode, entry->app, entry->category, FALSE);

	if (entry->focus != NULL)
		gtk_widget_grab_focus (entry->focus);

	free_back_entry (entry);
}
Ejemplo n.º 19
0
void
on_command_info_arrived (AnjutaCommand *command, Subversion *plugin)
{
	GQueue *info;
	gchar *message;
	
	info = svn_command_get_info_queue (SVN_COMMAND (command));
	
	while (g_queue_peek_head (info))
	{
		message = g_queue_pop_head (info);
		ianjuta_message_view_append (plugin->mesg_view, 
								     IANJUTA_MESSAGE_VIEW_TYPE_INFO,
									 message, "", NULL);
		g_free (message);
	}
}
Ejemplo n.º 20
0
static void
on_diff_command_data_arrived (AnjutaCommand *command, 
							  IAnjutaVcsDiffCallback callback)
{
	GQueue *output;
	gchar *line;
	
	output = svn_diff_command_get_output (SVN_DIFF_COMMAND (command));
	
	while (g_queue_peek_head (output))
	{
		line = g_queue_pop_head (output);
		callback (g_object_get_data (G_OBJECT (command), "file"), line,
				  g_object_get_data (G_OBJECT (command), "user-data"));
		g_free (line);
	}
}
Ejemplo n.º 21
0
static void dir_tree_file_read_on_last_chunk_cb (S3HttpClient *http, struct evbuffer *input_buf, gpointer ctx)
{
    gchar *buf = NULL;
    size_t buf_len;
    DirTreeFileOpData *op_data = (DirTreeFileOpData *) ctx;
    DirTreeFileRange *range;

    buf_len = evbuffer_get_length (input_buf);
    buf = (gchar *) evbuffer_pullup (input_buf, buf_len);
    
    /*
    range = g_queue_pop_head (op_data->q_ranges_requested);
    if (range) {
        op_data->c_size = range->size;
        op_data->c_off = range->off;
        op_data->c_req = range->c_req;
    }
    */

    op_data->total_read += buf_len;
    LOG_debug (DIR_TREE_LOG, "[%p %p] lTOTAL read: %zu (req: %zu), orig size: %zu, TOTAL: %"OFF_FMT", Qsize: %zu", 
        op_data->c_req, http,
        buf_len, op_data->c_size, op_data->en->size, op_data->total_read, g_queue_get_length (op_data->q_ranges_requested));
    
    if (op_data->file_read_cb)
        op_data->file_read_cb (op_data->c_req, TRUE, buf, buf_len);

    evbuffer_drain (input_buf, buf_len);
    
    // if there are more pending chunk requests 
    if (g_queue_get_length (op_data->q_ranges_requested) > 0) {
        range = g_queue_pop_head (op_data->q_ranges_requested);
        LOG_debug (DIR_TREE_LOG, "[%p] more data: %zd", range->c_req, range->size);
        op_data->c_size = range->size;
        op_data->c_off = range->off;
        op_data->c_req = range->c_req;
        g_free (range);

        op_data->op_in_progress = TRUE;
        // perform the next chunk request
        dir_tree_file_read_prepare_request (op_data, http, op_data->c_off, op_data->c_size);
    } else {
        LOG_debug (DIR_TREE_LOG, "Done downloading !!");
        op_data->op_in_progress = FALSE;
    }
}
Ejemplo n.º 22
0
static void
json_builder_free_all_state (JsonBuilder *builder)
{
  JsonBuilderState *state;

  while (!g_queue_is_empty (builder->priv->stack))
    {
      state = g_queue_pop_head (builder->priv->stack);
      json_builder_state_free (state);
    }

  if (builder->priv->root)
    {
      json_node_free (builder->priv->root);
      builder->priv->root = NULL;
    }
}
Ejemplo n.º 23
0
/**
 * Clear the ringbuffers data
 */
void
xmms_ringbuf_clear (xmms_ringbuf_t *ringbuf)
{
	g_return_if_fail (ringbuf);

	ringbuf->rd_index = 0;
	ringbuf->wr_index = 0;

	while (!g_queue_is_empty (ringbuf->hotspots)) {
		xmms_ringbuf_hotspot_t *hs;
		hs = g_queue_pop_head (ringbuf->hotspots);
		if (hs->destroy)
			hs->destroy (hs->arg);
		g_free (hs);
	}
	g_cond_signal (ringbuf->free_cond);
}
Ejemplo n.º 24
0
static void lcm_memq_destroy(lcm_memq_t *self)
{
    dbg(DBG_LCM, "destroying LCM memq provider context\n");
    if (self->notify_pipe[0] >= 0)
        lcm_internal_pipe_close(self->notify_pipe[0]);
    if (self->notify_pipe[1] >= 0)
        lcm_internal_pipe_close(self->notify_pipe[1]);

    while (!g_queue_is_empty(self->queue)) {
        memq_msg_t *msg = (memq_msg_t *) g_queue_pop_head(self->queue);
        memq_msg_destroy(msg);
    }
    g_queue_free(self->queue);
    g_mutex_free(self->mutex);
    memset(self, 0, sizeof(lcm_memq_t));
    free(self);
}
Ejemplo n.º 25
0
static void
cockpit_stream_dispose (GObject *object)
{
  CockpitStream *self = COCKPIT_STREAM (object);

  if (!self->priv->closed)
    close_immediately (self, "terminated");

  while (self->priv->out_queue->head)
    g_bytes_unref (g_queue_pop_head (self->priv->out_queue));

  if (self->priv->options)
    cockpit_stream_options_unref (self->priv->options);
  self->priv->options = NULL;

  G_OBJECT_CLASS (cockpit_stream_parent_class)->dispose (object);
}
Ejemplo n.º 26
0
/**
 * grl_net_wc_flush_delayed_requests:
 * @self: a #GrlNetWc instance
 *
 * This method will flush all the pending request in the queue.
 */
void
grl_net_wc_flush_delayed_requests (GrlNetWc *self)
{
  g_return_if_fail (GRL_IS_NET_WC (self));

  GrlNetWcPrivate *priv = self->priv;
  struct request_clos *c;

  while ((c = g_queue_pop_head (priv->pending))) {
    g_source_remove (c->source_id);
    g_object_unref (c->cancellable);
    g_free (c->url);
    g_free (c);
  }

  g_get_current_time (&priv->last_request);
}
Ejemplo n.º 27
0
/**
 * cockpit_channel_ready:
 * @self: a pipe
 *
 * Called by channel implementations to signal when they're
 * ready. Any messages received before the channel was ready
 * will be delivered to the channel's recv() vfunc in the order
 * that they were received.
 *
 * If this is called immediately after or during construction then
 * the closing will happen after the main loop so that handlers
 * can connect appropriately.
 */
void
cockpit_channel_ready (CockpitChannel *self)
{
  CockpitChannelClass *klass;
  GBytes *decoded;
  GBytes *payload;
  GQueue *queue;

  klass = COCKPIT_CHANNEL_GET_CLASS (self);
  g_assert (klass->recv != NULL);
  g_assert (klass->close != NULL);

  g_object_ref (self);
  while (self->priv->received)
    {
      queue = self->priv->received;
      self->priv->received = NULL;
      for (;;)
        {
          payload = g_queue_pop_head (queue);
          if (payload == NULL)
            break;
          if (self->priv->base64_encoding)
            {
              decoded = base64_decode (payload);
              g_bytes_unref (payload);
              payload = decoded;
            }
          (klass->recv) (self, payload);
          g_bytes_unref (payload);
        }
      g_queue_free (queue);
    }

  cockpit_channel_control (self, "ready", NULL);
  self->priv->ready = TRUE;

  /* No more data coming? */
  if (self->priv->received_done)
    {
      if (klass->control)
        (klass->control) (self, "done", NULL);
    }

  g_object_unref (self);
}
Ejemplo n.º 28
0
void
ipc_endpoint_decref(ipc_endpoint_t *ipc)
{
    if (!g_atomic_int_dec_and_test(&ipc->refcount))
        return;
    if (ipc->status == IPC_ENDPOINT_CONNECTED)
        ipc_endpoint_disconnect(ipc);
    if (ipc->queue) {
        while (!g_queue_is_empty(ipc->queue)) {
            queued_ipc_t *msg = g_queue_pop_head(ipc->queue);
            g_free(msg);
        }
        g_queue_free(ipc->queue);
    }
    ipc->status = IPC_ENDPOINT_FREED;
    g_slice_free(ipc_endpoint_t, ipc);
}
Ejemplo n.º 29
0
/*
 * pad buffer probe that compares the buffer with the top one
 * in the GQueue passed as the user data
 */
static GstPadProbeReturn
srcpad_dequeue_and_compare_buffer (GstPad * pad, GstPadProbeInfo * info,
    gpointer data)
{
  GQueue *queue = (GQueue *) data;
  GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER (info);
  GstBuffer *queue_buf;

  queue_buf = (GstBuffer *) g_queue_pop_head (queue);
  fail_if (queue_buf == NULL);

  fail_unless (test_buffer_equals (buf, queue_buf));

  gst_buffer_unref (queue_buf);

  return GST_PAD_PROBE_OK;
}
Ejemplo n.º 30
0
static int
check_object (CcnetProcessor *processor)
{
    USE_PRIV;
    char *obj_id;
    SeafDir *dir;
    static int i = 0;

    request_object_batch_begin(priv);

    /* process inspect queue */
    /* Note: All files in a directory must be checked in an iteration,
     * so we may send out more items than REQUEST_THRESHOLD */
    while (g_hash_table_size (priv->fs_objects) < MAX_NUM_UNREVD) {
        obj_id = (char *) g_queue_pop_head (priv->inspect_queue);
        if (obj_id == NULL)
            break;
        if (!seaf_fs_manager_object_exists(seaf->fs_mgr, obj_id)) {
            request_object_batch (processor, priv, obj_id);
        } else {
            dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, obj_id);
            if (!dir) {
                /* corrupt dir object */
                request_object_batch (processor, priv, obj_id);
            } else {
                check_seafdir(processor, dir);
                seaf_dir_free (dir);
            }
        }
        g_free (obj_id);        /* free the memory */
    }

    request_object_batch_flush (processor, priv);

    /* check end condition */
    if (i%10 == 0)
        seaf_debug ("[getfs] pending objects num: %d\n", priv->pending_objects);
    ++i;

    if (priv->pending_objects == 0 && g_queue_is_empty(priv->inspect_queue)) {
        ccnet_processor_send_update (processor, SC_END, SS_END, NULL, 0);
        ccnet_processor_done (processor, TRUE);
        return FALSE;
    } else
        return TRUE;
}