Ejemplo n.º 1
0
static gboolean
gst_adder_sink_event (GstCollectPads2 * pads, GstCollectData2 * pad,
    GstEvent * event, gpointer user_data)
{
  GstAdder *adder = GST_ADDER (user_data);
  gboolean res = FALSE;

  GST_DEBUG_OBJECT (pad->pad, "Got %s event on sink pad from %s",
      GST_EVENT_TYPE_NAME (event), GST_OBJECT_NAME (GST_EVENT_SRC (event)));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_FLUSH_START:
      /* drop flush start events, as we forwarded one already when handing the
       * flushing seek on the sink pad */
      gst_event_unref (event);
      res = TRUE;
      break;
    case GST_EVENT_FLUSH_STOP:
      /* we received a flush-stop. We will only forward it when
       * flush_stop_pending is set, and we will unset it then.
       */
      if (g_atomic_int_compare_and_exchange (&adder->flush_stop_pending,
              TRUE, FALSE)) {
        g_atomic_int_set (&adder->new_segment_pending, TRUE);
        GST_DEBUG_OBJECT (pad->pad, "forwarding flush stop");
      } else {
        gst_event_unref (event);
        res = TRUE;
        GST_DEBUG_OBJECT (pad->pad, "eating flush stop");
      }
      /* Clear pending tags */
      if (adder->pending_events) {
        g_list_foreach (adder->pending_events, (GFunc) gst_event_unref, NULL);
        g_list_free (adder->pending_events);
        adder->pending_events = NULL;
      }
      break;
    case GST_EVENT_TAG:
      /* collect tags here so we can push them out when we collect data */
      adder->pending_events = g_list_append (adder->pending_events, event);
      res = TRUE;
      break;
    case GST_EVENT_NEWSEGMENT:
      if (g_atomic_int_compare_and_exchange (&adder->wait_for_new_segment,
              TRUE, FALSE)) {
        /* make sure we push a new segment, to inform about new basetime
         * see FIXME in gst_adder_collected() */
        g_atomic_int_set (&adder->new_segment_pending, TRUE);
      }
      gst_event_unref (event);
      res = TRUE;
      break;
    default:
      break;
  }
  return res;
}
Ejemplo n.º 2
0
int 
main (int   argc,
      char *argv[])
{
  gint i;
  gint atomic = -5;
  gpointer atomic_pointer = NULL;
  gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1);

  for (i = 0; i < 15; i++)
    g_atomic_int_inc (&atomic);
  g_assert (atomic == 10);
  for (i = 0; i < 9; i++)
    g_assert (!g_atomic_int_dec_and_test (&atomic));
  g_assert (g_atomic_int_dec_and_test (&atomic));
  g_assert (atomic == 0);

  g_assert (g_atomic_int_add (&atomic, 5) == 0);
  g_assert (atomic == 5);

  g_assert (g_atomic_int_add (&atomic, -10) == 5);
  g_assert (atomic == -5);

  g_atomic_int_add (&atomic, 20);
  g_assert (atomic == 15);

  g_atomic_int_add (&atomic, -35);
  g_assert (atomic == -20);

  g_assert (atomic == g_atomic_int_get (&atomic));

  g_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20));
  g_assert (atomic == 20);
  
  g_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12));
  g_assert (atomic == 20);
  
  g_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT));
  g_assert (atomic == G_MAXINT);

  g_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT));
  g_assert (atomic == G_MININT);

  g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, 
						   NULL, biggest_pointer));
  g_assert (atomic_pointer == biggest_pointer);

  g_assert (atomic_pointer == g_atomic_pointer_get (&atomic_pointer));

  g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, 
						   biggest_pointer, NULL));
  g_assert (atomic_pointer == NULL);
  
  return 0;
}
/**
 * hg_snapshot_restore:
 * @snapshot:
 * @func:
 * @data:
 *
 * FIXME
 *
 * Returns:
 */
hg_bool_t
hg_snapshot_restore(hg_snapshot_t          *snapshot,
		    hg_restore_prep_func_t  func,
		    hg_pointer_t            data)
{
	hg_bool_t retval = FALSE;
	hg_mem_t *mem;
	static volatile hg_int_t is_running = 0;
	hg_int_t old_val;

	hg_return_val_if_fail (snapshot != NULL, FALSE, HG_e_typecheck);
	hg_return_val_if_fail (snapshot->snapshot != NULL, FALSE, HG_e_VMerror);
	hg_return_val_if_fail (snapshot->o.mem != NULL, FALSE, HG_e_VMerror);

	mem = snapshot->o.mem;

	hg_return_val_if_fail (mem->allocator != NULL, FALSE, HG_e_VMerror);
	hg_return_val_if_fail (mem->allocator->restore_snapshot != NULL, FALSE, HG_e_VMerror);
	hg_return_val_if_fail (mem->data != NULL, FALSE, HG_e_VMerror);
	hg_return_val_if_fail (mem->reference_table == NULL, FALSE, HG_e_VMerror);

  retry:
	old_val = g_atomic_int_get(&is_running);
	if (old_val > 0) {
		hg_critical("Unable to restore the snapshot during restoring");
		hg_error_return (HG_STATUS_FAILED, HG_e_VMerror);
	}
	if (!g_atomic_int_compare_and_exchange(&is_running, old_val, old_val + 1))
		goto retry;

	hg_mem_spool_run_gc(snapshot->o.mem);

	snapshot->ref_table = g_hash_table_new(g_direct_hash, g_direct_equal);
	if (!func(snapshot, data))
		goto finalize;

	retval = mem->allocator->restore_snapshot(mem->data,
						  snapshot->snapshot,
						  snapshot->ref_table);
	if (retval) {
		snapshot->snapshot = NULL;
	}
  finalize:
	g_hash_table_destroy(snapshot->ref_table);
	snapshot->ref_table = NULL;

  retry2:
	old_val = g_atomic_int_get(&is_running);
	if (!g_atomic_int_compare_and_exchange(&is_running, old_val, old_val - 1))
		goto retry2;

	return retval;
}
Ejemplo n.º 4
0
void
test_atomic (void)
{
  gint i;
  gint atomic = -5;
  gpointer atomic_pointer = NULL;
  gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1);

  for (i = 0; i < 15; i++)
    g_atomic_int_inc (&atomic);
  cut_assert_equal_int (10, atomic);
  for (i = 0; i < 9; i++)
    cut_assert (!g_atomic_int_dec_and_test (&atomic));
  cut_assert (g_atomic_int_dec_and_test (&atomic));
  cut_assert_equal_int (0, atomic);

  cut_assert_equal_int (0, g_atomic_int_exchange_and_add (&atomic, 5));
  cut_assert_equal_int (5, atomic);

  cut_assert_equal_int (5, g_atomic_int_exchange_and_add (&atomic, -10));
  cut_assert_equal_int (-5, atomic);

  g_atomic_int_add (&atomic, 20);
  cut_assert_equal_int (15, atomic);

  g_atomic_int_add (&atomic, -35);
  cut_assert_equal_int (-20, atomic);

  cut_assert_equal_int (atomic, g_atomic_int_get (&atomic));

  cut_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20));
  cut_assert_equal_int (20, atomic);
  
  cut_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12));
  cut_assert_equal_int (20, atomic);
  
  cut_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT));
  cut_assert_equal_int (G_MAXINT, atomic);

  cut_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT));
  cut_assert_equal_int (G_MININT, atomic);

  cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, 
						     NULL, biggest_pointer));
  cut_assert_equal_pointer (biggest_pointer, atomic_pointer);

  cut_assert_equal_pointer (atomic_pointer, g_atomic_pointer_get (&atomic_pointer));

  cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, 
						     biggest_pointer, NULL));
  cut_assert (biggest_pointer);
}
Ejemplo n.º 5
0
/**
 * g_atomic_int_compare_and_exchange:
 * @atomic: a pointer to a #gint or #guint
 * @oldval: the value to compare with
 * @newval: the value to conditionally replace with
 *
 * Compares @atomic to @oldval and, if equal, sets it to @newval.
 * If @atomic was not equal to @oldval then no change occurs.
 *
 * This compare and exchange is done atomically.
 *
 * Think of this operation as an atomic version of
 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
 *
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: %TRUE if the exchange took place
 *
 * Since: 2.4
 **/
gboolean
(g_atomic_int_compare_and_exchange) (volatile gint *atomic,
                                     gint           oldval,
                                     gint           newval)
{
  return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
}
Ejemplo n.º 6
0
/**
 * gst_mini_object_unlock:
 * @object: the mini-object to unlock
 * @flags: #GstLockFlags
 *
 * Unlock the mini-object with the specified access mode in @flags.
 */
void
gst_mini_object_unlock (GstMiniObject * object, GstLockFlags flags)
{
  gint access_mode, state, newstate;

  g_return_if_fail (object != NULL);
  g_return_if_fail (GST_MINI_OBJECT_IS_LOCKABLE (object));

  do {
    access_mode = flags & FLAG_MASK;
    newstate = state = g_atomic_int_get (&object->lockstate);

    GST_CAT_TRACE (GST_CAT_LOCKING, "unlock %p: state %08x, access_mode %d",
        object, state, access_mode);

    if (access_mode & GST_LOCK_FLAG_EXCLUSIVE) {
      /* shared counter */
      g_return_if_fail (state >= SHARE_ONE);
      newstate -= SHARE_ONE;
      access_mode &= ~GST_LOCK_FLAG_EXCLUSIVE;
    }

    if (access_mode) {
      g_return_if_fail ((state & access_mode) == access_mode);
      /* decrease the refcount */
      newstate -= LOCK_ONE;
      /* last refcount, unset access_mode */
      if ((newstate & LOCK_FLAG_MASK) == access_mode)
        newstate &= ~LOCK_FLAG_MASK;
    }
  } while (!g_atomic_int_compare_and_exchange (&object->lockstate, state,
          newstate));
}
Ejemplo n.º 7
0
void
rspamd_map_remove_all (struct rspamd_config *cfg)
{
	struct rspamd_map *map;
	GList *cur;
	struct rspamd_map_backend *bk;
	guint i;

	for (cur = cfg->maps; cur != NULL; cur = g_list_next (cur)) {
		map = cur->data;

		for (i = 0; i < map->backends->len; i ++) {
			bk = g_ptr_array_index (map->backends, i);
			MAP_RELEASE (bk, "rspamd_map_backend");
		}

		if (g_atomic_int_compare_and_exchange (&map->cache->available, 1, 0)) {
			unlink (map->cache->shmem_name);
		}

		if (map->dtor) {
			map->dtor (map->dtor_data);
		}
	}

	g_list_free (cfg->maps);
	cfg->maps = NULL;
}
Ejemplo n.º 8
0
int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
{
    unsigned int idx, rec_off, old_idx, new_idx;
    uint32_t rec_len = sizeof(TraceRecord) + datasize;
    uint64_t event_u64 = event;
    uint64_t timestamp_ns = get_clock();

    do {
        old_idx = g_atomic_int_get(&trace_idx);
        smp_rmb();
        new_idx = old_idx + rec_len;

        if (new_idx - writeout_idx > TRACE_BUF_LEN) {
            /* Trace Buffer Full, Event dropped ! */
            g_atomic_int_inc(&dropped_events);
            return -ENOSPC;
        }
    } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx));

    idx = old_idx % TRACE_BUF_LEN;

    rec_off = idx;
    rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64));
    rec_off = write_to_buffer(rec_off, &timestamp_ns, sizeof(timestamp_ns));
    rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
    rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid));

    rec->tbuf_idx = idx;
    rec->rec_off  = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
    return 0;
}
Ejemplo n.º 9
0
/* Start watching event for all maps */
void
rspamd_map_watch (struct rspamd_config *cfg,
		struct event_base *ev_base,
		struct rspamd_dns_resolver *resolver)
{
	GList *cur = cfg->maps;
	struct rspamd_map *map;

	/* First of all do synced read of data */
	while (cur) {
		map = cur->data;
		map->ev_base = ev_base;
		map->r = resolver;

		if (!g_atomic_int_compare_and_exchange (map->locked, 0, 1)) {
			msg_debug_map (
					"don't try to reread map as it is locked by other process, "
					"will reread it later");
			rspamd_map_schedule_periodic (map, TRUE, TRUE, FALSE);
		}
		else {
			rspamd_map_schedule_periodic (map, FALSE, TRUE, FALSE);
		}

		cur = g_list_next (cur);
	}
}
Ejemplo n.º 10
0
/*****************************************************************************
 * get_new_midi_event()
 *****************************************************************************/
volatile MIDI_EVENT *
get_new_midi_event(unsigned char queue_num)
{
	volatile MIDI_EVENT *new_event;
	guint               new_bulk_index;
	guint               old_bulk_index;

	do {
		old_bulk_index = (guint)(bulk_event_index[queue_num]);
		new_bulk_index = (old_bulk_index + 1) & MIDI_EVENT_POOL_MASK;
	} while (!g_atomic_int_compare_and_exchange(&(bulk_event_index[queue_num]),
	                                            (gint)old_bulk_index,
	                                            (gint)new_bulk_index));
	new_event = &(bulk_event_pool[queue_num][old_bulk_index]);

	new_event->next    = NULL;
	new_event->type    = MIDI_EVENT_NO_EVENT;
	new_event->channel = 0x0;
	new_event->byte2   = 0x0;
	new_event->byte3   = 0x0;
	new_event->bytes   = 0;
	new_event->data[0] = 0xF7;
	new_event->state   = EVENT_STATE_ALLOCATED;

	return new_event;
}
Ejemplo n.º 11
0
/* close() is allowed from any thread */
void
hio_server_close(HioServer  *hio_server)
{
    /* Be sure only one thread closes. Doing it this way instead of
     * with a lock means that when hio_server_close() returns
     * the server may not quite be closed yet but that is hopefully ok
     */
    int fd = g_atomic_int_get(&hio_server->fd);
    if (fd < 0)
        return;

    if (!g_atomic_int_compare_and_exchange(&hio_server->fd,
                                           fd, -1))
        return;

    /* the closing thread must also remove the watch */
    g_assert(hio_server->on_new_connections_id != 0);

    g_source_remove(hio_server->on_new_connections_id);
    hio_server->on_new_connections_id = 0;

    shutdown(fd, SHUT_RDWR);
    close(fd);

    g_signal_emit(G_OBJECT(hio_server),
                  signals[CLOSED],
                  0);
}
Ejemplo n.º 12
0
void ScLock::Lock()
{
  while (g_atomic_int_compare_and_exchange(&m_locked, 0, 1) == FALSE)
  {
    std::this_thread::sleep_for(std::chrono::microseconds(1));
  }
}
Ejemplo n.º 13
0
/**
 * iris_rrobin_append:
 * @rrobin: An #IrisRRobin
 * @data: a gpointer to callback data
 *
 * Appends a new data item to the round-robin structure. The data supplied
 * will be added to the arguments of the callback used in iris_rrobin_apply().
 *
 * Return value: %TRUE if there was enough free-space to append the item.
 */
gboolean
iris_rrobin_append (IrisRRobin *rrobin,
                    gpointer    data)
{
	gint count;
	gint i;

	g_return_val_if_fail (rrobin != NULL, FALSE);

_try_append:

	count = g_atomic_int_get (&rrobin->count);

	/* check we are not at capacity */
	if (count + 1 > rrobin->size)
		return FALSE;

	if (!g_atomic_int_compare_and_exchange (&rrobin->count, count, count + 1))
		goto _try_append;

	/* try to find a location to add to from the beginning */
	for (i = 0; i < rrobin->size; i++)
		if (g_atomic_pointer_compare_and_exchange (&rrobin->data [i], NULL, data))
			break;

	return TRUE;
}
Ejemplo n.º 14
0
gboolean
g_vfs_afp_connection_close_sync (GVfsAfpConnection *afp_connection,
                                 GCancellable      *cancellable,
                                 GError            **error)
{
  GVfsAfpConnectionPrivate *priv = afp_connection->priv;

  SyncData close_data;

  /* Take lock */
  g_mutex_lock (&priv->mutex);

  if (!check_open (afp_connection, error)) {
    g_mutex_unlock (&priv->mutex);
    return FALSE;
  }

  sync_data_init (&close_data, afp_connection, error);
  priv->pending_closes = g_slist_prepend (priv->pending_closes, &close_data);

  /* Release lock */
  g_mutex_unlock (&priv->mutex);

  if (g_atomic_int_compare_and_exchange (&priv->atomic_state, STATE_CONNECTED, STATE_PENDING_CLOSE))
    g_cancellable_cancel (priv->read_cancellable);
  
  sync_data_wait (&close_data);

  
  return close_data.res;
}
Ejemplo n.º 15
0
static void
notify (gpointer p)
{
  if (!g_atomic_int_compare_and_exchange (&freed, 0, 1))
    {
      g_error ("someone already freed it after %u iterations", i);
    }
}
Ejemplo n.º 16
0
Archivo: authsrv.c Proyecto: regit/nufw
/**
 * This is exit() handler. It's used on fatal error of NuAuth.
 * nuauth_cleanup() also call it, but this call is ignored,
 * because nuauth_cleanup() set nuauth_running to 0.
 */
void nuauth_atexit()
{
	if (g_atomic_int_compare_and_exchange(&nuauth_running, 1, 0)) {
		log_message(FATAL, DEBUG_AREA_MAIN,
			    "[+] Stopping NuAuth server (exit)");
		nuauth_deinit(FALSE);
	}
}
gboolean
gkr_operation_set_result (GkrOperation *op, MateKeyringResult res)
{
	g_assert (op);
	g_assert ((int) res != INCOMPLETE);
	g_atomic_int_compare_and_exchange (&op->result, INCOMPLETE, res);
	return g_atomic_int_get (&op->result) == res; /* Success when already set to res */
}
Ejemplo n.º 18
0
/**
 * gst_mini_object_lock:
 * @object: the mini-object to lock
 * @flags: #GstLockFlags
 *
 * Lock the mini-object with the specified access mode in @flags.
 *
 * Returns: %TRUE if @object could be locked.
 */
gboolean
gst_mini_object_lock (GstMiniObject * object, GstLockFlags flags)
{
  gint access_mode, state, newstate;

  g_return_val_if_fail (object != NULL, FALSE);
  g_return_val_if_fail (GST_MINI_OBJECT_IS_LOCKABLE (object), FALSE);

  if (G_UNLIKELY (object->flags & GST_MINI_OBJECT_FLAG_LOCK_READONLY &&
          flags & GST_LOCK_FLAG_WRITE))
    return FALSE;

  do {
    access_mode = flags & FLAG_MASK;
    newstate = state = g_atomic_int_get (&object->lockstate);

    GST_CAT_TRACE (GST_CAT_LOCKING, "lock %p: state %08x, access_mode %d",
        object, state, access_mode);

    if (access_mode & GST_LOCK_FLAG_EXCLUSIVE) {
      /* shared ref */
      newstate += SHARE_ONE;
      access_mode &= ~GST_LOCK_FLAG_EXCLUSIVE;
    }

    /* shared counter > 1 and write access is not allowed */
    if (((state & GST_LOCK_FLAG_WRITE) != 0
            || (access_mode & GST_LOCK_FLAG_WRITE) != 0)
        && IS_SHARED (newstate))
      goto lock_failed;

    if (access_mode) {
      if ((state & LOCK_FLAG_MASK) == 0) {
        /* nothing mapped, set access_mode */
        newstate |= access_mode;
      } else {
        /* access_mode must match */
        if ((state & access_mode) != access_mode)
          goto lock_failed;
      }
      /* increase refcount */
      newstate += LOCK_ONE;
    }
  } while (!g_atomic_int_compare_and_exchange (&object->lockstate, state,
          newstate));

  return TRUE;

lock_failed:
  {
    GST_CAT_DEBUG (GST_CAT_LOCKING,
        "lock failed %p: state %08x, access_mode %d", object, state,
        access_mode);
    return FALSE;
  }
}
Ejemplo n.º 19
0
/** set a value using  g_atomic_int_compare_and_exchange() */
static inline gint set_old(gint *atomic, gint newval) {
    gint oldval;
    while(TRUE) {
	oldval = g_atomic_int_get(atomic);
	if (g_atomic_int_compare_and_exchange(atomic, oldval, newval))
	    break;
	g_atomic_int_inc(&atomic_miss);
    }
    return(newval);
}
Ejemplo n.º 20
0
/**
 * swap the server connection with a connection from
 * the connection pool
 *
 * we can only switch backends if we have a authed connection in the pool.
 *
 * @return NULL if swapping failed
 *         the new backend on success
 */
network_socket *network_connection_pool_lua_swap(network_mysqld_con *con, int backend_ndx, GHashTable *pwd_table) {
	network_backend_t *backend = NULL;
	network_socket *send_sock;
	network_mysqld_con_lua_t *st = con->plugin_con_state;
//	GString empty_username = { "", 0, 0 };

	/*
	 * we can only change to another backend if the backend is already
	 * in the connection pool and connected
	 */

	backend = network_backends_get(con->srv->backends, backend_ndx);
	if (!backend) return NULL;


	/**
	 * get a connection from the pool which matches our basic requirements
	 * - username has to match
	 * - default_db should match
	 */
		
#ifdef DEBUG_CONN_POOL
	g_debug("%s: (swap) check if we have a connection for this user in the pool '%s'", G_STRLOC, con->client->response ? con->client->response->username->str: "empty_user");
#endif
       int flag = 0;
	network_connection_pool* pool = chassis_event_thread_pool(backend);
	if (NULL == (send_sock = network_connection_pool_get(pool))) {
		/**
		 * no connections in the pool
		 */
        flag = 1;
		if (NULL == (send_sock = self_connect(con, backend, pwd_table))) {
			st->backend_ndx = -1;
			return NULL;
		}
	}

	/* the backend is up and cool, take and move the current backend into the pool */
#ifdef DEBUG_CONN_POOL
	g_debug("%s: (swap) added the previous connection to the pool", G_STRLOC);
#endif
//	network_connection_pool_lua_add_connection(con);

	/* connect to the new backend */
	st->backend = backend;
//	st->backend->connected_clients++;
	st->backend_ndx = backend_ndx;
    
        if (flag == 0 && !g_atomic_int_compare_and_exchange(&st->backend->connected_clients, 0, 0)) {
            g_atomic_int_dec_and_test(&st->backend->connected_clients);
            //g_critical("pool_lua_swap:%08x's connected_clients is %d\n", backend,  backend->connected_clients);
        }

	return send_sock;
}
Ejemplo n.º 21
0
static void
rspamd_log_write_ringbuffer (rspamd_logger_t *rspamd_log,
		const gchar *module, const gchar *id,
		const gchar *data, glong len)
{
	guint32 row_num;
	struct rspamd_logger_error_log *elog;
	struct rspamd_logger_error_elt *elt;

	if (!rspamd_log->errlog) {
		return;
	}

	elog = rspamd_log->errlog;

	g_atomic_int_compare_and_exchange (&elog->cur_row, elog->max_elts, 0);
#if ((GLIB_MAJOR_VERSION == 2) && (GLIB_MINOR_VERSION > 30))
	row_num = g_atomic_int_add (&elog->cur_row, 1);
#else
	row_num = g_atomic_int_exchange_and_add (&elog->cur_row, 1);
#endif

	if (row_num < elog->max_elts) {
		elt = (struct rspamd_logger_error_elt *)(((guchar *)elog->elts) +
				(sizeof (*elt) + elog->elt_len) * row_num);
		g_atomic_int_set (&elt->completed, 0);
	}
	else {
		/* Race condition */
		elog->cur_row = 0;
		return;
	}

	elt->pid = rspamd_log->pid;
	elt->ptype = rspamd_log->process_type;
	elt->ts = rspamd_get_calendar_ticks ();

	if (id) {
		rspamd_strlcpy (elt->id, id, sizeof (elt->id));
	}
	else {
		rspamd_strlcpy (elt->id, "", sizeof (elt->id));
	}

	if (module) {
		rspamd_strlcpy (elt->module, module, sizeof (elt->module));
	}
	else {
		rspamd_strlcpy (elt->module, "", sizeof (elt->module));
	}

	rspamd_strlcpy (elt->message, data, MIN (len + 1, elog->elt_len));
	g_atomic_int_set (&elt->completed, 1);
}
Ejemplo n.º 22
0
void
oio_log_lazy_init (void)
{
	static volatile guint lazy_init = 1;
	if (lazy_init) {
		if (g_atomic_int_compare_and_exchange(&lazy_init, 1, 0)) {
			g_log_set_default_handler(oio_log_noop, NULL);
			oio_log_init_level(GRID_LOGLVL_ERROR);
		}
	}
}
Ejemplo n.º 23
0
WARN_UNUSED gboolean
ipc_endpoint_incref(ipc_endpoint_t *ipc)
{
    /* Prevents incref/decref race */
    int old;
    do {
        old = g_atomic_int_get(&ipc->refcount);
        if (old < 1)
            return FALSE;
    } while (!g_atomic_int_compare_and_exchange(&ipc->refcount, old, old+1));
    return TRUE;
}
Ejemplo n.º 24
0
static void
_flush_start (GstAggregator * self, GstAggregatorPad * aggpad, GstEvent * event)
{
  GstBuffer *tmpbuf;
  GstAggregatorPrivate *priv = self->priv;
  GstAggregatorPadPrivate *padpriv = aggpad->priv;

  g_atomic_int_set (&aggpad->priv->flushing, TRUE);
  /*  Remove pad buffer and wake up the streaming thread */
  tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
  gst_buffer_replace (&tmpbuf, NULL);
  PAD_STREAM_LOCK (aggpad);
  if (g_atomic_int_compare_and_exchange (&padpriv->pending_flush_start,
          TRUE, FALSE) == TRUE) {
    GST_DEBUG_OBJECT (aggpad, "Expecting FLUSH_STOP now");
    g_atomic_int_set (&padpriv->pending_flush_stop, TRUE);
  }

  if (g_atomic_int_get (&priv->flush_seeking)) {
    /* If flush_seeking we forward the first FLUSH_START */
    if (g_atomic_int_compare_and_exchange (&priv->pending_flush_start,
            TRUE, FALSE) == TRUE) {

      GST_INFO_OBJECT (self, "Flushing, pausing srcpad task");
      _stop_srcpad_task (self, event);
      priv->flow_return = GST_FLOW_OK;

      GST_INFO_OBJECT (self, "Getting STREAM_LOCK while seeking");
      GST_PAD_STREAM_LOCK (self->srcpad);
      GST_LOG_OBJECT (self, "GOT STREAM_LOCK");
      event = NULL;
    }
  } else {
    gst_event_unref (event);
  }
  PAD_STREAM_UNLOCK (aggpad);

  tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
  gst_buffer_replace (&tmpbuf, NULL);
}
Ejemplo n.º 25
0
static inline gboolean
gst_ts_cache_rollback (GstTSCache * cache, Slot * slot)
{
  gboolean rollback;
  rollback = g_atomic_int_compare_and_exchange (&slot->state, STATE_RECYCLE,
      STATE_FULL);
  if (rollback) {
    g_atomic_int_inc (&cache->fslots);
  } else if (g_atomic_int_get (&slot->state) == STATE_FULL) {
    rollback = TRUE;
  }
  return rollback;
}
Ejemplo n.º 26
0
static inline gboolean
gst_ts_cache_rollforward (GstTSCache * cache, Slot * slot)
{
  gboolean rollforward;
  rollforward = g_atomic_int_compare_and_exchange (&slot->state, STATE_FULL,
      STATE_RECYCLE);
  if (rollforward) {
    g_atomic_int_add (&cache->fslots, -1);
  } else if (g_atomic_int_get (&slot->state) == STATE_RECYCLE) {
    rollforward = TRUE;
  }
  return rollforward;
}
Ejemplo n.º 27
0
GstBuffer *
gst_ts_cache_pop (GstTSCache * cache, gboolean drain)
{
  GstBuffer *buffer = NULL;
  Slot *head;
  gboolean pop;

#if DEBUG_RINGBUFFER
  dump_cache_state (cache, "pre-pop");
#endif

  head = &cache->slots[cache->head];

  if (drain) {
    if (g_atomic_int_compare_and_exchange (&head->state, STATE_PART,
            STATE_FULL)) {
      cache->h_rb_offset = GST_BUFFER_OFFSET (head->buffer) + head->size;
      g_atomic_int_inc (&cache->fslots);
    }
  }

  pop = g_atomic_int_compare_and_exchange (&head->state, STATE_FULL, STATE_POP);

  if (pop) {
    g_atomic_int_add (&cache->fslots, -1);
    buffer = gst_slot_buffer_new (cache, head);
    if (cache->need_discont) {
      GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
      cache->need_discont = FALSE;
    }

    cache->head = (cache->head + 1) % cache->nslots;
  }
#if DEBUG_RINGBUFFER
  dump_cache_state (cache, "post-pop");
#endif

  return buffer;
}
Ejemplo n.º 28
0
int janus_text2pcap_close(janus_text2pcap *instance) {
	if(instance == NULL)
		return -1;
	janus_mutex_lock_nodebug(&instance->mutex);
	if(!g_atomic_int_compare_and_exchange(&instance->writable, 1, 0)) {
		janus_mutex_unlock_nodebug(&instance->mutex);
		return 0;
	}
	fclose(instance->file);
	instance->file = NULL;
	janus_mutex_unlock_nodebug(&instance->mutex);
	return 0;
}
Ejemplo n.º 29
0
static inline gboolean
gst_ts_cache_recycle (GstTSCache * cache, Slot * slot)
{
  gboolean recycle;
  recycle = g_atomic_int_compare_and_exchange (&slot->state, STATE_RECYCLE,
      STATE_EMPTY);
  if (recycle) {
    if (GST_BUFFER_OFFSET (slot->buffer) != INVALID_OFFSET)
      cache->l_rb_offset = GST_BUFFER_OFFSET (slot->buffer) + slot->size;
    GST_BUFFER_OFFSET (slot->buffer) = INVALID_OFFSET;
    slot->size = 0;
  }
  return recycle;
}
Ejemplo n.º 30
0
void janus_dtls_srtp_destroy(janus_dtls_srtp *dtls) {
	if(!dtls || !g_atomic_int_compare_and_exchange(&dtls->destroyed, 0, 1))
		return;
	dtls->ready = 0;
	dtls->retransmissions = 0;
#ifdef HAVE_SCTP
	/* Destroy the SCTP association if this is a DataChannel */
	if(dtls->sctp != NULL) {
		janus_sctp_association_destroy(dtls->sctp);
		janus_refcount_decrease(&dtls->sctp->ref);
		dtls->sctp = NULL;
	}
#endif
	janus_refcount_decrease(&dtls->ref);
}