void test_atomic (void) { gint i; gint atomic = -5; gpointer atomic_pointer = NULL; gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1); for (i = 0; i < 15; i++) g_atomic_int_inc (&atomic); cut_assert_equal_int (10, atomic); for (i = 0; i < 9; i++) cut_assert (!g_atomic_int_dec_and_test (&atomic)); cut_assert (g_atomic_int_dec_and_test (&atomic)); cut_assert_equal_int (0, atomic); cut_assert_equal_int (0, g_atomic_int_exchange_and_add (&atomic, 5)); cut_assert_equal_int (5, atomic); cut_assert_equal_int (5, g_atomic_int_exchange_and_add (&atomic, -10)); cut_assert_equal_int (-5, atomic); g_atomic_int_add (&atomic, 20); cut_assert_equal_int (15, atomic); g_atomic_int_add (&atomic, -35); cut_assert_equal_int (-20, atomic); cut_assert_equal_int (atomic, g_atomic_int_get (&atomic)); cut_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20)); cut_assert_equal_int (20, atomic); cut_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12)); cut_assert_equal_int (20, atomic); cut_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT)); cut_assert_equal_int (G_MAXINT, atomic); cut_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT)); cut_assert_equal_int (G_MININT, atomic); cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, NULL, biggest_pointer)); cut_assert_equal_pointer (biggest_pointer, atomic_pointer); cut_assert_equal_pointer (atomic_pointer, g_atomic_pointer_get (&atomic_pointer)); cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, biggest_pointer, NULL)); cut_assert (biggest_pointer); }
/** * fm_init * @config: (allow-none): configuration file data * * Initializes libfm data. This API should be always called before any * other Libfm function is called. It is idempotent. * * Returns: %FALSE in case of duplicate call. * * Since: 0.1.0 */ gboolean fm_init(FmConfig* config) { #if GLIB_CHECK_VERSION(2, 30, 0) if (g_atomic_int_add(&init_done, 1) != 0) #else if (g_atomic_int_exchange_and_add(&init_done, 1) != 0) #endif return FALSE; /* duplicate call */ #ifdef ENABLE_NLS bindtextdomain(GETTEXT_PACKAGE, PACKAGE_LOCALE_DIR); bind_textdomain_codeset(GETTEXT_PACKAGE, "UTF-8"); #endif #if !GLIB_CHECK_VERSION(2, 36, 0) g_type_init(); #endif #if !GLIB_CHECK_VERSION(2, 32, 0) g_thread_init(NULL); #endif g_thread_pool_set_max_idle_time(10000); /* is 10 sec enough? */ if(config) fm_config = (FmConfig*)g_object_ref(config); else { /* create default config object */ fm_config = fm_config_new(); fm_config_load_from_file(fm_config, NULL); } #ifdef USE_UDISKS /* extension point should be added before any other GIO monitor call otherwise it will be ignored by GIO because GIO initializes it once */ _fm_udisks_init(); #endif _fm_file_init(); _fm_path_init(); _fm_icon_init(); _fm_monitor_init(); _fm_mime_type_init(); _fm_file_info_init(); /* should be called only after _fm_mime_type_init() */ _fm_folder_init(); _fm_archiver_init(); _fm_thumbnailer_init(); // must be called after mime-types are initialized _fm_thumbnail_loader_init(); _fm_terminal_init(); /* should be called after config initialization */ _fm_templates_init(); _fm_folder_config_init(); #ifdef HAVE_ACTIONS /* generated by vala */ _fm_file_actions_init(); #endif fm_qdata_id = g_quark_from_static_string("fm_qdata_id"); return TRUE; }
GVfsFtpConnection * g_vfs_ftp_connection_new (GSocketConnectable *addr, GCancellable * cancellable, GError ** error) { GVfsFtpConnection *conn; g_return_val_if_fail (G_IS_SOCKET_CONNECTABLE (addr), NULL); conn = g_slice_new0 (GVfsFtpConnection); conn->client = g_socket_client_new (); conn->debug_id = g_atomic_int_exchange_and_add (&debug_id, 1); conn->commands = G_IO_STREAM (g_socket_client_connect (conn->client, addr, cancellable, error)); if (conn->commands == NULL) { g_object_unref (conn->client); g_slice_free (GVfsFtpConnection, conn); return NULL; } enable_keepalive (G_SOCKET_CONNECTION (conn->commands)); conn->commands_in = G_DATA_INPUT_STREAM (g_data_input_stream_new (g_io_stream_get_input_stream (conn->commands))); g_data_input_stream_set_newline_type (conn->commands_in, G_DATA_STREAM_NEWLINE_TYPE_CR_LF); /* The first thing that needs to happen is receiving the welcome message */ conn->waiting_for_reply = TRUE; return conn; }
MoonSurface * MoonSurface::ref () { g_atomic_int_exchange_and_add (&refcount, 1); return this; }
/** * pka_subscription_new: * * Creates a new instance of #PkaSubscription. * * Returns: the newly created instance. */ PkaSubscription* pka_subscription_new (void) { static gint id_seq = 0; PkaSubscription *subscription; #define INITIALIZE_TREE(_field, _free) \ G_STMT_START { \ subscription->_field = g_tree_new_full( \ (GCompareDataFunc)g_int_compare, \ subscription, \ (GDestroyNotify)g_free, \ (GDestroyNotify)_free); \ } G_STMT_END ENTRY; subscription = g_slice_new0(PkaSubscription); subscription->ref_count = 1; g_static_rw_lock_init(&subscription->rw_lock); subscription->id = g_atomic_int_exchange_and_add(&id_seq, 1); subscription->state = PKA_SUBSCRIPTION_MUTED; g_get_current_time(&subscription->created_at); INITIALIZE_TREE(channels, g_object_unref); INITIALIZE_TREE(sources, g_object_unref); INITIALIZE_TREE(manifests, pka_manifest_unref); RETURN(subscription); }
static void assert_singleton_execution3 (void) { static volatile int seen_execution = 0; int old_seen_execution = g_atomic_int_exchange_and_add (&seen_execution, 1); if (old_seen_execution != 0) g_error ("%s: function executed more than once", G_STRFUNC); }
void MoonSurface::unref () { int v; v = g_atomic_int_exchange_and_add (&refcount, -1) - 1; if (v == 0) delete this; }
/** * couchdb_struct_field_ref: * @sf: A #CouchdbStructField object * * Increments reference count of a #CouchdbStructField object. * * Return value: A pointer to the referenced object. */ CouchdbStructField * couchdb_struct_field_ref (CouchdbStructField *sf) { g_return_val_if_fail (sf != NULL, NULL); g_return_val_if_fail (sf->ref_count > 0, NULL); g_atomic_int_exchange_and_add (&sf->ref_count, 1); return sf; }
/** * json_object_ref: * @object: a #JsonObject * * Increase by one the reference count of a #JsonObject. * * Return value: the passed #JsonObject, with the reference count * increased by one. */ JsonObject * json_object_ref (JsonObject *object) { g_return_val_if_fail (object != NULL, NULL); g_return_val_if_fail (object->ref_count > 0, NULL); g_atomic_int_exchange_and_add (&object->ref_count, 1); return object; }
/** * couchdb_document_info_ref: * @doc_info: A #CouchdbDocumentInfo object * * Increments reference counting of the given #CouchdbDocumentInfo object. * * Return value: A pointer to the object being referenced. */ CouchdbDocumentInfo * couchdb_document_info_ref (CouchdbDocumentInfo *doc_info) { g_return_val_if_fail (doc_info != NULL, NULL); g_return_val_if_fail (doc_info->ref_count > 0, NULL); g_atomic_int_exchange_and_add (&doc_info->ref_count, 1); return doc_info; }
/** * json_array_ref: * @array: a #JsonArray * * Increase by one the reference count of a #JsonArray. * * Return value: the passed #JsonArray, with the reference count * increased by one. */ JsonArray * json_array_ref (JsonArray *array) { g_return_val_if_fail (array != NULL, NULL); g_return_val_if_fail (array->ref_count > 0, NULL); g_atomic_int_exchange_and_add (&array->ref_count, 1); return array; }
/* the poll/select call is also performed on a control socket, that way * we can send special commands to control it */ static inline gboolean raise_wakeup (GstPoll * set) { gboolean result = TRUE; if (g_atomic_int_exchange_and_add (&set->control_pending, 1) == 0) { /* raise when nothing pending */ result = WAKE_EVENT (set); } return result; }
static void rspamd_log_write_ringbuffer (rspamd_logger_t *rspamd_log, const gchar *module, const gchar *id, const gchar *data, glong len) { guint32 row_num; struct rspamd_logger_error_log *elog; struct rspamd_logger_error_elt *elt; if (!rspamd_log->errlog) { return; } elog = rspamd_log->errlog; g_atomic_int_compare_and_exchange (&elog->cur_row, elog->max_elts, 0); #if ((GLIB_MAJOR_VERSION == 2) && (GLIB_MINOR_VERSION > 30)) row_num = g_atomic_int_add (&elog->cur_row, 1); #else row_num = g_atomic_int_exchange_and_add (&elog->cur_row, 1); #endif if (row_num < elog->max_elts) { elt = (struct rspamd_logger_error_elt *)(((guchar *)elog->elts) + (sizeof (*elt) + elog->elt_len) * row_num); g_atomic_int_set (&elt->completed, 0); } else { /* Race condition */ elog->cur_row = 0; return; } elt->pid = rspamd_log->pid; elt->ptype = rspamd_log->process_type; elt->ts = rspamd_get_calendar_ticks (); if (id) { rspamd_strlcpy (elt->id, id, sizeof (elt->id)); } else { rspamd_strlcpy (elt->id, "", sizeof (elt->id)); } if (module) { rspamd_strlcpy (elt->module, module, sizeof (elt->module)); } else { rspamd_strlcpy (elt->module, "", sizeof (elt->module)); } rspamd_strlcpy (elt->message, data, MIN (len + 1, elog->elt_len)); g_atomic_int_set (&elt->completed, 1); }
/** * pka_context_new: * * Creates a new instance of #PkaContext. * * Returns: the newly created instance. */ PkaContext* pka_context_new (void) { PkaContext *context; ENTRY; context = g_slice_new0(PkaContext); context->ref_count = 1; context->id = g_atomic_int_exchange_and_add((gint *)&context_seq, 1); RETURN(context); }
static guint gst_video_overlay_get_seqnum (void) { static gint seqnum; /* 0 */ #if GLIB_CHECK_VERSION(2,29,5) return (guint) g_atomic_int_add (&seqnum, 1); #else return (guint) g_atomic_int_exchange_and_add (&seqnum, 1); #endif }
static gpointer tmain_call_initializer3 (gpointer user_data) { g_mutex_lock (tmutex); g_cond_wait (tcond, tmutex); g_mutex_unlock (tmutex); //g_printf ("["); initializer3(); //g_printf ("]\n"); g_atomic_int_exchange_and_add (&thread_call_count, 1); return NULL; }
/** * g_hash_table_unref: * @hash_table: a valid #GHashTable. * * Atomically decrements the reference count of @hash_table by one. * If the reference count drops to 0, all keys and values will be * destroyed, and all memory allocated by the hash table is released. * This function is MT-safe and may be called from any thread. * * Since: 2.10 **/ void g_hash_table_unref (GHashTable *hash_table) { g_return_if_fail (hash_table != NULL); g_return_if_fail (hash_table->ref_count > 0); if (g_atomic_int_exchange_and_add (&hash_table->ref_count, -1) - 1 == 0) { g_hash_table_remove_all_nodes (hash_table, TRUE); g_free (hash_table->nodes); g_slice_free (GHashTable, hash_table); } }
static GstPad * gst_mxf_mux_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * pad_name) { GstMXFMux *mux = GST_MXF_MUX (element); GstMXFMuxPad *cpad; GstPad *pad = NULL; guint pad_number; gchar *name = NULL; const MXFEssenceElementWriter *writer; if (mux->state != GST_MXF_MUX_STATE_HEADER) { GST_WARNING_OBJECT (mux, "Can't request pads after writing header"); return NULL; } writer = mxf_essence_element_writer_find (templ); if (!writer) { GST_ERROR_OBJECT (mux, "Not our template"); return NULL; } pad_number = g_atomic_int_exchange_and_add ((gint *) & mux->n_pads, 1); name = gst_mxf_mux_create_pad_name (templ, pad_number); GST_DEBUG_OBJECT (mux, "Creating pad '%s'", name); pad = gst_pad_new_from_template (templ, name); g_free (name); cpad = (GstMXFMuxPad *) gst_collect_pads_add_pad (mux->collect, pad, sizeof (GstMXFMuxPad)); cpad->last_timestamp = 0; cpad->adapter = gst_adapter_new (); cpad->writer = writer; /* FIXME: hacked way to override/extend the event function of * GstCollectPads; because it sets its own event function giving the * element no access to events. */ mux->collect_event = (GstPadEventFunction) GST_PAD_EVENTFUNC (pad); gst_pad_set_event_function (pad, GST_DEBUG_FUNCPTR (gst_mxf_mux_handle_sink_event)); gst_pad_set_setcaps_function (pad, gst_mxf_mux_setcaps); gst_pad_use_fixed_caps (pad); gst_pad_set_active (pad, TRUE); gst_element_add_pad (element, pad); return pad; }
/** * g_hash_table_unref: * @hash_table: a valid #GHashTable. * * Atomically decrements the reference count of @hash_table by one. * If the reference count drops to 0, all keys and values will be * destroyed, and all memory allocated by the hash table is released. * This function is MT-safe and may be called from any thread. * * Since: 2.10 **/ void g_hash_table_unref (GHashTable *hash_table) { g_return_if_fail (hash_table != NULL); g_return_if_fail (hash_table->ref_count > 0); if (g_atomic_int_exchange_and_add (&hash_table->ref_count, -1) - 1 == 0) { gint i; for (i = 0; i < hash_table->size; i++) g_hash_nodes_destroy (hash_table->nodes[i], hash_table->key_destroy_func, hash_table->value_destroy_func); g_free (hash_table->nodes); g_slice_free (GHashTable, hash_table); } }
mrpc_status_t format_request(struct mrpc_connection *conn, unsigned cmd, void *data, struct mrpc_message **result) { struct mrpc_message *msg; xdrproc_t type; mrpc_status_t ret; if (conn->set->protocol->sender_request_info(cmd, &type, NULL)) return MINIRPC_ENCODING_ERR; ret=format_message(conn, type, data, &msg); if (ret) return ret; msg->hdr.sequence=g_atomic_int_exchange_and_add(&conn->next_sequence, 1); msg->hdr.status=MINIRPC_PENDING; msg->hdr.cmd=cmd; *result=msg; return MINIRPC_OK; }
void __connman_tethering_set_enabled(void) { int err; DBG("enabled %d", tethering_enabled + 1); if (g_atomic_int_exchange_and_add(&tethering_enabled, 1) == 0) { const char *dns; err = create_bridge(BRIDGE_NAME); if (err < 0) return; err = enable_bridge(BRIDGE_NAME); if (err < 0 && err != -EALREADY) { remove_bridge(BRIDGE_NAME); return; } dns = BRIDGE_IP; if (__connman_dnsproxy_add_listener(BRIDGE_NAME) < 0) { connman_error("Can't add listener %s to DNS proxy", BRIDGE_NAME); dns = BRIDGE_DNS; } tethering_dhcp_server = dhcp_server_start(BRIDGE_NAME, BRIDGE_IP, BRIDGE_SUBNET, BRIDGE_IP_START, BRIDGE_IP_END, 24 * 3600, dns); if (tethering_dhcp_server == NULL) { disable_bridge(BRIDGE_NAME); remove_bridge(BRIDGE_NAME); return; } enable_nat(default_interface); DBG("tethering started"); } }
/* TYPICALLY CALLED FROM ANOTHER THREAD */ void hio_output_stream_close(HioOutputStream *stream) { if (g_atomic_int_exchange_and_add(&stream->closed, 1) == 0) { /* If we just went from 0 (not closed) to 1 (closed) and that * makes us done, we need to notify done-ness in the task * thread. If we still aren't done, we'll notify done-ness * once we flush so don't need to add a handler here. * * It's important not to add a watcher if we've already * notified done-ness because we'll have unblocked completion * and you can't add watchers to completed tasks. */ if (hio_output_stream_is_done(stream)) { hrt_task_add_immediate(stream->task, on_notify_done_after_close, g_object_ref(stream), g_object_unref); } } }
/* call the above 1024 initializers */ static void* stress_concurrent_initializers (void *user_data) { static void (*initializers[]) (void) = { LIST_256_TEST_INITIALIZERS (stress1), LIST_256_TEST_INITIALIZERS (stress2), LIST_256_TEST_INITIALIZERS (stress3), LIST_256_TEST_INITIALIZERS (stress4), }; int i; /* sync to main thread */ g_mutex_lock (tmutex); g_mutex_unlock (tmutex); /* initialize concurrently */ for (i = 0; i < G_N_ELEMENTS (initializers); i++) { initializers[i](); g_atomic_int_exchange_and_add (&thread_call_count, 1); } return NULL; }
static inline gint release_all_wakeup (GstPoll * set) { gint old; while (TRUE) { if (!(old = g_atomic_int_get (&set->control_pending))) /* nothing pending, just exit */ break; /* try to remove all pending control messages */ if (g_atomic_int_compare_and_exchange (&set->control_pending, old, 0)) { /* we managed to remove all messages, read the control socket */ if (RELEASE_EVENT (set)) break; else /* retry again until we read it successfully */ g_atomic_int_exchange_and_add (&set->control_pending, 1); } } return old; }
void __connman_notifier_connect(enum connman_service_type type) { DBG("type %d", type); switch (type) { case CONNMAN_SERVICE_TYPE_UNKNOWN: case CONNMAN_SERVICE_TYPE_SYSTEM: case CONNMAN_SERVICE_TYPE_GPS: case CONNMAN_SERVICE_TYPE_VPN: case CONNMAN_SERVICE_TYPE_GADGET: return; case CONNMAN_SERVICE_TYPE_ETHERNET: case CONNMAN_SERVICE_TYPE_WIFI: case CONNMAN_SERVICE_TYPE_WIMAX: case CONNMAN_SERVICE_TYPE_BLUETOOTH: case CONNMAN_SERVICE_TYPE_CELLULAR: break; } if (g_atomic_int_exchange_and_add(&connected[type], 1) == 0) technology_connected(type, TRUE); }
guint gsl_vorbis_make_serialno (void) { static guint global_ogg_serial = ('B' << 24) | ('S' << 16) | ('E' << 8) | 128; return g_atomic_int_exchange_and_add (&global_ogg_serial, 1); }
static GstPad * gst_interleave_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * req_name) { GstInterleave *self = GST_INTERLEAVE (element); GstPad *new_pad; gchar *pad_name; gint channels, padnumber; GValue val = { 0, }; if (templ->direction != GST_PAD_SINK) goto not_sink_pad; channels = g_atomic_int_exchange_and_add (&self->channels, 1); padnumber = g_atomic_int_exchange_and_add (&self->padcounter, 1); pad_name = g_strdup_printf ("sink%d", padnumber); new_pad = GST_PAD_CAST (g_object_new (GST_TYPE_INTERLEAVE_PAD, "name", pad_name, "direction", templ->direction, "template", templ, NULL)); GST_INTERLEAVE_PAD_CAST (new_pad)->channel = channels; GST_DEBUG_OBJECT (self, "requested new pad %s", pad_name); g_free (pad_name); gst_pad_set_setcaps_function (new_pad, GST_DEBUG_FUNCPTR (gst_interleave_sink_setcaps)); gst_pad_set_getcaps_function (new_pad, GST_DEBUG_FUNCPTR (gst_interleave_sink_getcaps)); gst_collect_pads_add_pad (self->collect, new_pad, sizeof (GstCollectData)); /* FIXME: hacked way to override/extend the event function of * GstCollectPads; because it sets its own event function giving the * element no access to events */ self->collect_event = (GstPadEventFunction) GST_PAD_EVENTFUNC (new_pad); gst_pad_set_event_function (new_pad, GST_DEBUG_FUNCPTR (gst_interleave_sink_event)); if (!gst_element_add_pad (element, new_pad)) goto could_not_add; g_value_init (&val, GST_TYPE_AUDIO_CHANNEL_POSITION); g_value_set_enum (&val, GST_AUDIO_CHANNEL_POSITION_NONE); self->input_channel_positions = g_value_array_append (self->input_channel_positions, &val); g_value_unset (&val); /* Update the src caps if we already have them */ if (self->sinkcaps) { GstCaps *srccaps; GstStructure *s; /* Take lock to make sure processing finishes first */ GST_OBJECT_LOCK (self->collect); srccaps = gst_caps_copy (self->sinkcaps); s = gst_caps_get_structure (srccaps, 0); gst_structure_set (s, "channels", G_TYPE_INT, self->channels, NULL); gst_interleave_set_channel_positions (self, s); gst_pad_set_caps (self->src, srccaps); gst_caps_unref (srccaps); GST_OBJECT_UNLOCK (self->collect); } return new_pad; /* errors */ not_sink_pad: { g_warning ("interleave: requested new pad that is not a SINK pad\n"); return NULL; } could_not_add: { GST_DEBUG_OBJECT (self, "could not add pad %s", GST_PAD_NAME (new_pad)); gst_collect_pads_remove_pad (self->collect, new_pad); gst_object_unref (new_pad); return NULL; } }
int main (int argc, char *argv[]) { gint i; gint atomic = -5; gpointer atomic_pointer = NULL; gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1); #ifdef SYMBIAN g_log_set_handler (NULL, G_LOG_FLAG_FATAL| G_LOG_FLAG_RECURSION | G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_WARNING | G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO | G_LOG_LEVEL_DEBUG, &mrtLogHandler, NULL); g_set_print_handler(mrtPrintHandler); #endif /*SYMBIAN*/ for (i = 0; i < 15; i++) g_atomic_int_inc (&atomic); g_assert (atomic == 10); for (i = 0; i < 9; i++) g_assert (!g_atomic_int_dec_and_test (&atomic)); g_assert (g_atomic_int_dec_and_test (&atomic)); g_assert (atomic == 0); g_assert (g_atomic_int_exchange_and_add (&atomic, 5) == 0); g_assert (atomic == 5); g_assert (g_atomic_int_exchange_and_add (&atomic, -10) == 5); g_assert (atomic == -5); g_atomic_int_add (&atomic, 20); g_assert (atomic == 15); g_atomic_int_add (&atomic, -35); g_assert (atomic == -20); g_assert (atomic == g_atomic_int_get (&atomic)); g_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20)); g_assert (atomic == 20); g_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12)); g_assert (atomic == 20); g_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT)); g_assert (atomic == G_MAXINT); g_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT)); g_assert (atomic == G_MININT); g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, NULL, biggest_pointer)); g_assert (atomic_pointer == biggest_pointer); g_assert (atomic_pointer == g_atomic_pointer_get (&atomic_pointer)); g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, biggest_pointer, NULL)); g_assert (atomic_pointer == NULL); #ifdef SYMBIAN testResultXml("atomic-test"); #endif /* EMULATOR */ return 0; }
static liHandlerResult mod_limit_action_handle(liVRequest *vr, gpointer param, gpointer *context) { gboolean limit_reached = FALSE; mod_limit_context *ctx = (mod_limit_context*) param; GPtrArray *arr = g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id); gint cons; mod_limit_req_ip_data *rid; liSocketAddress *remote_addr = &vr->coninfo->remote_addr; gpointer addr; guint32 bits; UNUSED(context); if (li_vrequest_is_handled(vr)) { VR_DEBUG(vr, "%s", "mod_limit: already have a content handler - ignoring limits. Put limit.* before content handlers such as 'static', 'fastcgi' or 'proxy'"); return LI_HANDLER_GO_ON; } /* IPv4 or IPv6? */ switch (remote_addr->addr->plain.sa_family) { case AF_INET: addr = &remote_addr->addr->ipv4.sin_addr.s_addr; bits = 32; break; case AF_INET6: addr = &remote_addr->addr->ipv6.sin6_addr.s6_addr; bits = 128; break; default: if (ctx->type == ML_TYPE_CON_IP || ctx->type == ML_TYPE_REQ_IP) { VR_DEBUG(vr, "%s", "mod_limit only supports ipv4 or ipv6 clients"); return LI_HANDLER_ERROR; } addr = NULL; bits = 0; } if (!arr) { /* request is not in any context yet, create new array */ arr = g_ptr_array_sized_new(2); g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id) = arr; } switch (ctx->type) { case ML_TYPE_CON: #ifdef GLIB_VERSION_2_30 /* since 2.30 g_atomic_int_add does the same as g_atomic_int_exchange_and_add, * before it didn't return the old value. this fixes the deprecation warning. */ if (g_atomic_int_add(&ctx->pool.con, 1) > ctx->limit) { g_atomic_int_add(&ctx->pool.con, -1); limit_reached = TRUE; VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit); } #else if (g_atomic_int_exchange_and_add(&ctx->pool.con, 1) > ctx->limit) { g_atomic_int_add(&ctx->pool.con, -1); limit_reached = TRUE; VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit); } #endif break; case ML_TYPE_CON_IP: g_mutex_lock(ctx->mutex); cons = GPOINTER_TO_INT(li_radixtree_lookup_exact(ctx->pool.con_ip, addr, bits)); if (cons < ctx->limit) { li_radixtree_insert(ctx->pool.con_ip, addr, bits, GINT_TO_POINTER(cons+1)); } else { limit_reached = TRUE; VR_DEBUG(vr, "limit.con_ip: limit reached (%d active connections)", ctx->limit); } g_mutex_unlock(ctx->mutex); break; case ML_TYPE_REQ: g_mutex_lock(ctx->mutex); if (li_cur_ts(vr->wrk) - ctx->pool.req.ts > 1.0) { /* reset pool */ ctx->pool.req.ts = li_cur_ts(vr->wrk); ctx->pool.req.num = 1; } else { ctx->pool.req.num++; if (ctx->pool.req.num > ctx->limit) { limit_reached = TRUE; VR_DEBUG(vr, "limit.req: limit reached (%d req/s)", ctx->limit); } } g_mutex_unlock(ctx->mutex); break; case ML_TYPE_REQ_IP: g_mutex_lock(ctx->mutex); rid = li_radixtree_lookup_exact(ctx->pool.req_ip, addr, bits); if (!rid) { /* IP not known */ rid = g_slice_new0(mod_limit_req_ip_data); rid->requests = 1; rid->ip = li_sockaddr_dup(*remote_addr); rid->ctx = ctx; rid->timeout_elem.data = rid; li_radixtree_insert(ctx->pool.req_ip, addr, bits, rid); li_waitqueue_push(&(((mod_limit_data*)ctx->plugin->data)->timeout_queues[vr->wrk->ndx]), &rid->timeout_elem); } else if (rid->requests < ctx->limit) { rid->requests++; } else { limit_reached = TRUE; VR_DEBUG(vr, "limit.req_ip: limit reached (%d req/s)", ctx->limit); } g_mutex_unlock(ctx->mutex); break; } if (limit_reached) { /* limit reached, we either execute the defined action or return a 503 error page */ if (ctx->action_limit_reached) { /* execute action */ li_action_enter(vr, ctx->action_limit_reached); } else { /* return 503 error page */ if (!li_vrequest_handle_direct(vr)) { return LI_HANDLER_ERROR; } vr->response.http_status = 503; } } else { g_ptr_array_add(arr, ctx); g_atomic_int_inc(&ctx->refcount); } return LI_HANDLER_GO_ON; }
static GRealThreadPool* g_thread_pool_wait_for_new_pool (void) { GRealThreadPool *pool; gint local_wakeup_thread_serial; guint local_max_unused_threads; gint local_max_idle_time; gint last_wakeup_thread_serial; gboolean have_relayed_thread_marker = FALSE; local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); g_atomic_int_inc (&unused_threads); do { if (g_atomic_int_get (&unused_threads) >= local_max_unused_threads) { /* If this is a superfluous thread, stop it. */ pool = NULL; } else if (local_max_idle_time > 0) { /* If a maximal idle time is given, wait for the given time. */ GTimeVal end_time; g_get_current_time (&end_time); g_time_val_add (&end_time, local_max_idle_time * 1000); DEBUG_MSG (("thread %p waiting in global pool for %f seconds.", g_thread_self (), local_max_idle_time / 1000.0)); pool = g_async_queue_timed_pop (unused_thread_queue, &end_time); } else { /* If no maximal idle time is given, wait indefinitely. */ DEBUG_MSG (("thread %p waiting in global pool.", g_thread_self ())); pool = g_async_queue_pop (unused_thread_queue); } if (pool == wakeup_thread_marker) { local_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); if (last_wakeup_thread_serial == local_wakeup_thread_serial) { if (!have_relayed_thread_marker) { /* If this wakeup marker has been received for * the second time, relay it. */ DEBUG_MSG (("thread %p relaying wakeup message to " "waiting thread with lower serial.", g_thread_self ())); g_async_queue_push (unused_thread_queue, wakeup_thread_marker); have_relayed_thread_marker = TRUE; /* If a wakeup marker has been relayed, this thread * will get out of the way for 100 microseconds to * avoid receiving this marker again. */ g_usleep (100); } } else { if (g_atomic_int_exchange_and_add (&kill_unused_threads, -1) > 0) { pool = NULL; break; } DEBUG_MSG (("thread %p updating to new limits.", g_thread_self ())); local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = local_wakeup_thread_serial; have_relayed_thread_marker = FALSE; } } } while (pool == wakeup_thread_marker); g_atomic_int_add (&unused_threads, -1); return pool; }