static guint get_next_packet_rtp_payload_size(guint stream_id, GstScreamQueue *self) { GstScreamDataQueueRtpItem *item; GstScreamStream *stream; guint size = 0; g_rw_lock_reader_lock(&self->lock); stream = g_hash_table_lookup(self->streams, GUINT_TO_POINTER(stream_id)); g_rw_lock_reader_unlock(&self->lock); if ((item = gst_atomic_queue_peek(stream->packet_queue))) { size = item->rtp_payload_size; } return size; }
int drew_loader_get_type(DrewLoader *ldr, int id) { if (!ldr) return -DREW_ERR_INVALID; g_rw_lock_reader_lock(&ldr->lock); int retval = is_valid_id(ldr, id, 0) ? ldr->plugin[id].type : -DREW_ERR_INVALID; g_rw_lock_reader_unlock(&ldr->lock); return retval; }
static void dupin_attachment_db_p_update_real (DupinAttachmentDBP * p, DupinAttachmentDB * attachment_db) { g_rw_lock_reader_lock (attachment_db->rwlock); gboolean todelete = attachment_db->todelete; g_rw_lock_reader_unlock (attachment_db->rwlock); if (todelete == TRUE) { if (p->attachment_dbs != NULL) { /* NOTE - need to remove pointer from parent if linkb is "hot deleted" */ DupinAttachmentDB ** attachment_dbs = g_malloc (sizeof (DupinAttachmentDB *) * p->size); gint i; gint current_numb = p->numb; p->numb = 0; for (i=0; i < current_numb ; i++) { if (p->attachment_dbs[i] != attachment_db) { attachment_dbs[p->numb] = p->attachment_dbs[i]; p->numb++; } } g_free (p->attachment_dbs); p->attachment_dbs = attachment_dbs; } return; } if (p->attachment_dbs == NULL) { p->attachment_dbs = g_malloc (sizeof (DupinAttachmentDB *) * DUPIN_ATTACHMENT_DB_P_SIZE); p->size = DUPIN_ATTACHMENT_DB_P_SIZE; } else if (p->numb == p->size) { p->size += DUPIN_ATTACHMENT_DB_P_SIZE; p->attachment_dbs = g_realloc (p->attachment_dbs, sizeof (DupinAttachmentDB *) * p->size); } p->attachment_dbs[p->numb] = attachment_db; p->numb++; }
static void _schedulerpolicyhoststeal_push(SchedulerPolicy* policy, Event* event, Host* srcHost, Host* dstHost, SimulationTime barrier) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; /* non-local events must be properly delayed so the event wont show up at another host * before the next scheduling interval. if the thread scheduler guaranteed to always run * the minimum time event accross all of its assigned hosts, then we would only need to * do the time adjustment if the srcThread and dstThread are not identical. however, * the logic of this policy allows a thread to run all events from a given host before * moving on to the next host, so we must adjust the time whenever the srcHost and * dstHost are not the same. */ SimulationTime eventTime = event_getTime(event); if(srcHost != dstHost && eventTime < barrier) { event_setTime(event, barrier); info("Inter-host event time %"G_GUINT64_FORMAT" changed to %"G_GUINT64_FORMAT" " "to ensure event causality", eventTime, barrier); } g_rw_lock_reader_lock(&data->lock); /* we want to track how long this thread spends idle waiting to push the event */ HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(pthread_self())); /* get the queue for the destination */ HostStealQueueData* qdata = g_hash_table_lookup(data->hostToQueueDataMap, dstHost); g_rw_lock_reader_unlock(&data->lock); utility_assert(qdata); /* tracking idle time spent waiting for the destination queue lock */ if(tdata) { g_timer_continue(tdata->pushIdleTime); g_mutex_lock(&(tdata->lock)); } g_mutex_lock(&(qdata->lock)); if(tdata) { g_timer_stop(tdata->pushIdleTime); } /* 'deliver' the event to the destination queue */ priorityqueue_push(qdata->pq, event); qdata->nPushed++; /* release the destination queue lock */ g_mutex_unlock(&(qdata->lock)); if(tdata) { g_mutex_unlock(&(tdata->lock)); } }
static void test_rwlock6 (void) { static GRWLock lock; gboolean ret; g_rw_lock_writer_lock (&lock); ret = g_rw_lock_reader_trylock (&lock); g_assert (!ret); g_rw_lock_writer_unlock (&lock); g_rw_lock_reader_lock (&lock); ret = g_rw_lock_writer_trylock (&lock); g_assert (!ret); g_rw_lock_reader_unlock (&lock); }
static gboolean gum_jsc_script_backend_is_ignoring (GumScriptBackend * backend, GumThreadId thread_id) { GumJscScriptBackend * self = GUM_JSC_SCRIPT_BACKEND (backend); GumJscScriptBackendPrivate * priv = self->priv; gboolean is_ignored; g_rw_lock_reader_lock (&priv->ignored_lock); is_ignored = priv->ignored_threads != NULL && g_hash_table_contains ( priv->ignored_threads, GSIZE_TO_POINTER (thread_id)); g_rw_lock_reader_unlock (&priv->ignored_lock); return is_ignored; }
/** * gst_allocator_find: * @name: (allow-none): the name of the allocator * * Find a previously registered allocator with @name. When @name is NULL, the * default allocator will be returned. * * Returns: (transfer full): a #GstAllocator or NULL when the allocator with @name was not * registered. Use gst_object_unref() to release the allocator after usage. */ GstAllocator * gst_allocator_find (const gchar * name) { GstAllocator *allocator; g_rw_lock_reader_lock (&lock); if (name) { allocator = g_hash_table_lookup (allocators, (gconstpointer) name); } else { allocator = _default_allocator; } if (allocator) gst_object_ref (allocator); g_rw_lock_reader_unlock (&lock); return allocator; }
static void approve_transmit_cb(guint stream_id, GstScreamQueue *self) { GstScreamDataQueueRtpItem *item; GstScreamStream *stream; g_rw_lock_reader_lock(&self->lock); stream = g_hash_table_lookup(self->streams, GUINT_TO_POINTER(stream_id)); g_rw_lock_reader_unlock(&self->lock); if ((item = gst_atomic_queue_pop(stream->packet_queue))) { stream->enqueued_payload_size -= item->rtp_payload_size; stream->enqueued_packets--; GST_LOG_OBJECT(self, "approving: pt = %u, seq: %u, pass: %u", item->rtp_pt, item->rtp_seq, self->pass_through); gst_data_queue_push(self->approved_packets, (GstDataQueueItem *)item); } else GST_LOG_OBJECT(self, "Got approve callback on an empty queue, or flushing"); }
/** * g_resources_get_info: * @path: A pathname inside the resource * @lookup_flags: A #GResourceLookupFlags * @size: (out) (allow-none): a location to place the length of the contents of the file, * or %NULL if the length is not needed * @flags: (out) (allow-none): a location to place the flags about the file, * or %NULL if the length is not needed * @error: return location for a #GError, or %NULL * * Looks for a file at the specified @path in the set of * globally registered resources and if found returns information about it. * * @lookup_flags controls the behaviour of the lookup. * * Returns: %TRUE if the file was found. %FALSE if there were errors * * Since: 2.32 **/ gboolean g_resources_get_info (const gchar *path, GResourceLookupFlags lookup_flags, gsize *size, guint32 *flags, GError **error) { gboolean res = FALSE; GList *l; gboolean r_res; register_lazy_static_resources (); g_rw_lock_reader_lock (&resources_lock); for (l = registered_resources; l != NULL; l = l->next) { GResource *r = l->data; GError *my_error = NULL; r_res = g_resource_get_info (r, path, lookup_flags, size, flags, &my_error); if (!r_res && g_error_matches (my_error, G_RESOURCE_ERROR, G_RESOURCE_ERROR_NOT_FOUND)) { g_clear_error (&my_error); } else { if (!r_res) g_propagate_error (error, my_error); res = r_res; break; } } if (l == NULL) g_set_error (error, G_RESOURCE_ERROR, G_RESOURCE_ERROR_NOT_FOUND, _("The resource at '%s' does not exist"), path); g_rw_lock_reader_unlock (&resources_lock); return res; }
int drew_loader_get_algo_name(DrewLoader *ldr, int id, const char **namep) { if (!ldr) return -DREW_ERR_INVALID; g_rw_lock_reader_lock(&ldr->lock); int retval = 0; if (!is_valid_id(ldr, id, 0)) retval = -DREW_ERR_INVALID; else *namep = ldr->plugin[id].name; g_rw_lock_reader_unlock(&ldr->lock); return retval; }
static GstScreamStream * get_stream(GstScreamQueue *self, guint ssrc, guint pt) { GstScreamStream *stream = NULL; gboolean adapt_stream = FALSE; guint stream_id = ssrc; if (G_LIKELY(g_hash_table_contains(self->adapted_stream_ids, GUINT_TO_POINTER(stream_id)))) { g_rw_lock_reader_lock(&self->lock); stream = g_hash_table_lookup(self->streams, GUINT_TO_POINTER(stream_id)); g_rw_lock_reader_unlock(&self->lock); } else if (g_hash_table_contains(self->ignored_stream_ids, GUINT_TO_POINTER(stream_id))) { /* DO NOTHING */ } else { g_signal_emit_by_name(self, "on-payload-adaptation-request", pt, &adapt_stream); if (!adapt_stream) { GST_DEBUG_OBJECT(self, "Ignoring adaptation for payload %u for ssrc %u", pt, stream_id); g_hash_table_add(self->ignored_stream_ids, GUINT_TO_POINTER(stream_id)); } else { if (gst_scream_controller_register_new_stream(self->scream_controller, stream_id, self->priority, SCREAM_MIN_BITRATE, SCREAM_MAX_BITRATE, (GstScreamQueueBitrateRequestedCb)on_bitrate_change, (GstScreamQueueNextPacketSizeCb)get_next_packet_rtp_payload_size, (GstScreamQueueApproveTransmitCb)approve_transmit_cb, (GstScreamQueueClearQueueCb)clear_queue, (gpointer)self)) { stream = g_new0(GstScreamStream, 1); stream->ssrc = ssrc; stream->pt = pt; stream->packet_queue = gst_atomic_queue_new(0); stream->enqueued_payload_size = 0; stream->enqueued_packets = 0; g_rw_lock_writer_lock(&self->lock); g_hash_table_insert(self->streams, GUINT_TO_POINTER(stream_id), stream); g_rw_lock_writer_unlock(&self->lock); g_hash_table_add(self->adapted_stream_ids, GUINT_TO_POINTER(stream_id)); } else { GST_WARNING_OBJECT(self, "Failed to register new stream\n"); } } } return stream; }
/** * g_resources_lookup_data: * @path: A pathname inside the resource * @lookup_flags: A #GResourceLookupFlags * @error: return location for a #GError, or %NULL * * Looks for a file at the specified @path in the set of * globally registered resources and returns a #GBytes that * lets you directly access the data in memory. * * The data is always followed by a zero byte, so you * can safely use the data as a C string. However, that byte * is not included in the size of the GBytes. * * For uncompressed resource files this is a pointer directly into * the resource bundle, which is typically in some readonly data section * in the program binary. For compressed files we allocate memory on * the heap and automatically uncompress the data. * * @lookup_flags controls the behaviour of the lookup. * * Returns: (transfer full): #GBytes or %NULL on error. * Free the returned object with g_bytes_unref() * * Since: 2.32 **/ GBytes * g_resources_lookup_data (const gchar *path, GResourceLookupFlags lookup_flags, GError **error) { GBytes *res = NULL; GList *l; GBytes *data; register_lazy_static_resources (); g_rw_lock_reader_lock (&resources_lock); for (l = registered_resources; l != NULL; l = l->next) { GResource *r = l->data; GError *my_error = NULL; data = g_resource_lookup_data (r, path, lookup_flags, &my_error); if (data == NULL && g_error_matches (my_error, G_RESOURCE_ERROR, G_RESOURCE_ERROR_NOT_FOUND)) { g_clear_error (&my_error); } else { if (data == NULL) g_propagate_error (error, my_error); res = data; break; } } if (l == NULL) g_set_error (error, G_RESOURCE_ERROR, G_RESOURCE_ERROR_NOT_FOUND, _("The resource at '%s' does not exist"), path); g_rw_lock_reader_unlock (&resources_lock); return res; }
static void _shadowtorpreload_cryptoLockingFunc(int mode, int n, const char *file, int line) { assert(shadowtorpreloadGlobalState.initialized); GRWLock* lock = &(shadowtorpreloadGlobalState.cryptoThreadLocks[n]); assert(lock); if(mode & CRYPTO_LOCK) { if(mode & CRYPTO_READ) { g_rw_lock_reader_lock(lock); } else if(mode & CRYPTO_WRITE) { g_rw_lock_writer_lock(lock); } } else if(mode & CRYPTO_UNLOCK) { if(mode & CRYPTO_READ) { g_rw_lock_reader_unlock(lock); } else if(mode & CRYPTO_WRITE) { g_rw_lock_writer_unlock(lock); } } }
gint get_conn_pending_count(network_connection_pool *pool, const gchar *username) { pool_status *pool_st = NULL; gint ret = 0; g_assert(pool); g_assert(username); g_rw_lock_reader_lock(&pool->pool_status_lock); if(pool->conn_pool_status) { pool_st = g_hash_table_lookup(pool->conn_pool_status, username); if (pool_st) { g_mutex_lock(&pool_st->status_mutex); ret = pool_st->conn_num_in_pending; g_mutex_unlock(&pool_st->status_mutex); } } g_rw_lock_reader_unlock(&pool->pool_status_lock); return ret; }
GError* meta0_backend_get_all_meta1_ref(struct meta0_backend_s *m0, GPtrArray **result) { GError *err; EXTRA_ASSERT(m0 != NULL); EXTRA_ASSERT(result != NULL); if (NULL != (err = _reload(m0, TRUE))) { g_prefix_error(&err, "Reload error: "); return err; } g_rw_lock_reader_lock(&(m0->rwlock)); EXTRA_ASSERT(m0->array_meta1_ref != NULL); *result = meta0_utils_array_meta1ref_dup(m0->array_meta1_ref); g_rw_lock_reader_unlock(&(m0->rwlock)); return NULL; }
gint get_conn_using_pending_count(network_connection_pool *pool, const gchar *username) { pool_status *pool_st = NULL; gint ret = 0; g_assert(pool); g_assert(username); g_rw_lock_reader_lock(&(pool->pool_status_lock)); if (pool->conn_pool_status != NULL ) { pool_st = g_hash_table_lookup(pool->conn_pool_status, username); if (pool_st == NULL ) { ret = 0; } else { ret = pool_st->conn_num_in_pending + pool_st->conn_num_in_use; } } g_rw_lock_reader_unlock(&(pool->pool_status_lock)); return ret; }
static SimulationTime _schedulerpolicyhoststeal_getNextTime(SchedulerPolicy* policy) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; /* set up state that we need for the foreach queue iterator */ HostStealSearchState searchState; memset(&searchState, 0, sizeof(HostStealSearchState)); searchState.data = data; searchState.nextEventTime = SIMTIME_MAX; g_rw_lock_reader_lock(&data->lock); HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(pthread_self())); g_rw_lock_reader_unlock(&data->lock); if(tdata) { /* make sure we get all hosts, which are probably held in the processedHosts queue between rounds */ g_queue_foreach(tdata->unprocessedHosts, (GFunc)_schedulerpolicyhoststeal_findMinTime, &searchState); g_queue_foreach(tdata->processedHosts, (GFunc)_schedulerpolicyhoststeal_findMinTime, &searchState); } info("next event at time %"G_GUINT64_FORMAT, searchState.nextEventTime); return searchState.nextEventTime; }
static GQueue* _schedulerpolicyhoststeal_getHosts(SchedulerPolicy* policy) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; g_rw_lock_reader_lock(&data->lock); HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(pthread_self())); g_rw_lock_reader_unlock(&data->lock); if(!tdata) { return NULL; } if(g_queue_is_empty(tdata->unprocessedHosts)) { return tdata->processedHosts; } if(g_queue_is_empty(tdata->processedHosts)) { return tdata->unprocessedHosts; } if(tdata->allHosts) { g_queue_free(tdata->allHosts); } tdata->allHosts = g_queue_copy(tdata->processedHosts); g_queue_foreach(tdata->unprocessedHosts, (GFunc)concat_queue_iter, tdata->allHosts); return tdata->allHosts; }
int drew_loader_get_functbl(DrewLoader *ldr, int id, const void **tbl) { if (!ldr) return -DREW_ERR_INVALID; g_rw_lock_reader_lock(&ldr->lock); int retval; if (!is_valid_id(ldr, id, 0)) { retval = -DREW_ERR_INVALID; goto out; } if (tbl) *tbl = ldr->plugin[id].functbl; retval = ldr->plugin[id].functblsize; out: g_rw_lock_reader_unlock(&ldr->lock); return retval; }
/** * 从db-sql-rule的列表中删除相应的规则 * @param table db-sql-rule规则列表 * @param dbname 需要删除规则对应的数据库 * @param normalized_sql 对应的标准化的sql * @return 删除成功返回TRUE,失败返回FALSE */ gboolean delete_rule_from_db_sql_rule( db_sql_rule_table *table, const char *dbname, const char *normalized_sql) { if (NULL == table || NULL == dbname|| NULL == normalized_sql) { return FALSE; } gboolean ret = FALSE; GString *db_key = g_string_new(dbname); sql_rule_table *sql_rule_table_v = NULL; g_rw_lock_reader_lock(&table->table_lock); sql_rule_table_v = g_hash_table_lookup(table->db_sql_rule, db_key); g_rw_lock_reader_unlock(&table->table_lock); if (sql_rule_table_v) { ret = delete_rule_from_sql_rule( sql_rule_table_v, normalized_sql); } g_string_free(db_key, TRUE); return ret; }
/* This function queries the number of plugins in the library which contains the * plugin with ID id. As a special case, if id is -1, return the total number * of plugins loaded. */ int drew_loader_get_nplugins(DrewLoader *ldr, int id) { int retval = 0; if (!ldr) return -DREW_ERR_INVALID; g_rw_lock_reader_lock(&ldr->lock); if (id == -1) { retval = ldr->nplugins; goto out; } if (!is_valid_id(ldr, id, 0)) { retval = is_valid_id(ldr, id, 1) ? 0 : -DREW_ERR_INVALID; goto out; } retval = ldr->plugin[id].lib->nplugins; out: g_rw_lock_reader_unlock(&ldr->lock); return retval; }
/** * 从user-db-sql-rule的列表中查询相应的规则的动作 * @param table db-sql-rule规则列表 * @param user 规则对应的用户名 * @param dbname 规则对应的数据库名 * @param normalized_sql 标准化sql * @param type 规则类型 * @param exist[in] 当exist 为1时,表示规则存在;当exist 被设置为0时表示规则不存在 * @return 查询规则的动作 */ security_action get_action_from_user_db_sql_rule( user_db_sql_rule_table *table, const char *user, const char *dbname, const char *normalized_sql, security_model_type type, int *exist ) { security_action ret = ACTION_SAFE; g_assert(IS_CORRECT_TYPE(type)); g_assert(exist); if (NULL == table || NULL == table->user_db_sql_rule[type]) { return ret; } if (NULL == user || NULL == dbname || NULL == normalized_sql) { return ret; } GString *user_key = g_string_new(user); db_sql_rule_table* db_sql_rule_table_v = NULL; g_rw_lock_reader_lock(&table->table_lock[type]); db_sql_rule_table_v = g_hash_table_lookup(table->user_db_sql_rule[type], user_key); g_rw_lock_reader_unlock(&table->table_lock[type]); if (db_sql_rule_table_v) { ret = get_action_from_db_sql_rule( db_sql_rule_table_v, dbname, normalized_sql, exist); } g_string_free(user_key, TRUE); return ret; }
GError* meta0_backend_get_one(struct meta0_backend_s *m0, const guint8 *prefix, gchar ***u) { GError *err; EXTRA_ASSERT(m0 != NULL); EXTRA_ASSERT(u != NULL); GRID_TRACE("%s(%p,%02X%02X,%p)", __FUNCTION__, m0, prefix[0], prefix[1], u); if (NULL != (err = _reload(m0, TRUE))) { g_prefix_error(&err, "Reload error: "); return err; } g_rw_lock_reader_lock(&(m0->rwlock)); EXTRA_ASSERT(m0->array_by_prefix != NULL); *u = meta0_utils_array_get_urlv(m0->array_by_prefix, prefix); g_rw_lock_reader_unlock(&(m0->rwlock)); return *u ? NULL : NEWERROR(EINVAL, "META0 partially missing"); }
/** * 从db-sql-rule的列表中获取相应规则 * @param table db-sql-rule规则列表 * @param dbname 规则对应的数据库名 * @param normalized_sql 标准化sql * @return 通过dbname和标准化的sql索引的规则 */ sql_security_rule* get_rule_from_db_sql_rule( db_sql_rule_table *table, const char *dbname, const char *normalized_sql) { if (NULL == table || NULL == dbname|| NULL == normalized_sql) { return FALSE; } sql_security_rule *rule = NULL; GString *db_key = g_string_new(dbname); sql_rule_table *sql_rule_table_v = NULL; g_rw_lock_reader_lock(&table->table_lock); sql_rule_table_v = g_hash_table_lookup(table->db_sql_rule, db_key); g_rw_lock_reader_unlock(&table->table_lock); if (sql_rule_table_v) { rule = get_rule_from_sql_rule( sql_rule_table_v, normalized_sql ); } g_string_free(db_key, TRUE); return rule; }
/* this must be run synchronously, or the thread must be protected by locks */ static void _schedulerpolicyhoststeal_addHost(SchedulerPolicy* policy, Host* host, pthread_t randomThread) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; /* each host has its own queue * we don't read lock data->lock because we only modify the table here anyway */ if(!g_hash_table_lookup(data->hostToQueueDataMap, host)) { g_rw_lock_writer_lock(&data->lock); g_hash_table_replace(data->hostToQueueDataMap, host, _hoststealqueuedata_new()); g_rw_lock_writer_unlock(&data->lock); } /* each thread keeps track of the hosts it needs to run */ pthread_t assignedThread = (randomThread != 0) ? randomThread : pthread_self(); g_rw_lock_reader_lock(&data->lock); HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(assignedThread)); g_rw_lock_reader_unlock(&data->lock); if(!tdata) { tdata = _hoststealthreaddata_new(); g_rw_lock_writer_lock(&data->lock); g_hash_table_replace(data->threadToThreadDataMap, GUINT_TO_POINTER(assignedThread), tdata); tdata->tnumber = data->threadCount; data->threadCount++; g_array_append_val(data->threadList, tdata); } else { g_rw_lock_writer_lock(&data->lock); } /* store the host-to-thread mapping */ g_hash_table_replace(data->hostToThreadMap, host, GUINT_TO_POINTER(assignedThread)); g_rw_lock_writer_unlock(&data->lock); /* if the target thread is stealing the host, we don't want to add it twice */ if(host != tdata->runningHost) { g_queue_push_tail(tdata->unprocessedHosts, host); } }
int drew_loader_lookup_by_type(DrewLoader *ldr, int type, int start, int end) { if (!ldr) return -DREW_ERR_INVALID; g_rw_lock_reader_lock(&ldr->lock); if (end == -1) end = ldr->nplugins; for (int i = start; i < end; i++) { if (!is_valid_id(ldr, i, 0)) continue; if (ldr->plugin[i].type == type) { g_rw_lock_reader_unlock(&ldr->lock); return i; } } g_rw_lock_reader_unlock(&ldr->lock); return -DREW_ERR_NONEXISTENT; }
static Event* _schedulerpolicyhoststeal_pop(SchedulerPolicy* policy, SimulationTime barrier) { MAGIC_ASSERT(policy); HostStealPolicyData* data = policy->data; /* first, we try to pop a host from this thread's queue */ g_rw_lock_reader_lock(&data->lock); HostStealThreadData* tdata = g_hash_table_lookup(data->threadToThreadDataMap, GUINT_TO_POINTER(pthread_self())); g_rw_lock_reader_unlock(&data->lock); /* if there is no tdata, that means this thread didn't get any hosts assigned to it */ if(!tdata) { /* this thread will remain idle */ return NULL; } /* we only need to lock this thread's lock, since it's our own queue */ g_timer_continue(tdata->popIdleTime); g_mutex_lock(&(tdata->lock)); g_timer_stop(tdata->popIdleTime); if(barrier > tdata->currentBarrier) { tdata->currentBarrier = barrier; /* make sure all of the hosts that were processed last time get processed in the next round */ if(g_queue_is_empty(tdata->unprocessedHosts) && !g_queue_is_empty(tdata->processedHosts)) { GQueue* swap = tdata->unprocessedHosts; tdata->unprocessedHosts = tdata->processedHosts; tdata->processedHosts = swap; } else { while(!g_queue_is_empty(tdata->processedHosts)) { g_queue_push_tail(tdata->unprocessedHosts, g_queue_pop_head(tdata->processedHosts)); } } } /* attempt to get an event from this thread's queue */ Event* nextEvent = _schedulerpolicyhoststeal_popFromThread(policy, tdata, tdata->unprocessedHosts, barrier); g_mutex_unlock(&(tdata->lock)); if(nextEvent != NULL) { return nextEvent; } /* no more hosts with events on this thread, try to steal a host from the other threads' queues */ GHashTableIter iter; gpointer key, value; g_rw_lock_reader_lock(&data->lock); guint i, n = data->threadCount; g_rw_lock_reader_unlock(&data->lock); for(i = 1; i < n; i++) { guint stolenTnumber = (i + tdata->tnumber) % n; g_rw_lock_reader_lock(&data->lock); HostStealThreadData* stolenTdata = g_array_index(data->threadList, HostStealThreadData*, stolenTnumber); g_rw_lock_reader_unlock(&data->lock); /* We don't need a lock here, because we're only reading, and a misread just means either * we read as empty when it's not, in which case the assigned thread (or one of the others) * will pick it up anyway, or it reads as non-empty when it is empty, in which case we'll * just get a NULL event and move on. Accepting this reduces lock contention towards the end * of every round. */ if(g_queue_is_empty(stolenTdata->unprocessedHosts)) { continue; } /* We need to lock the thread we're stealing from, to be sure that we're not stealing * something already being stolen, as well as our own lock, to be sure nobody steals * what we just stole. But we also need to do this in a well-ordered manner, to * prevent deadlocks. To do this, we always lock the lock with the smaller thread * number first. */ g_timer_continue(tdata->popIdleTime); if(tdata->tnumber < stolenTnumber) { g_mutex_lock(&(tdata->lock)); g_mutex_lock(&(stolenTdata->lock)); } else { g_mutex_lock(&(stolenTdata->lock)); g_mutex_lock(&(tdata->lock)); } g_timer_stop(tdata->popIdleTime); /* attempt to get event from the other thread's queue, likely moving a host from its * unprocessedHosts into this threads runningHost (and eventually processedHosts) */ nextEvent = _schedulerpolicyhoststeal_popFromThread(policy, tdata, stolenTdata->unprocessedHosts, barrier); /* must unlock in reverse order of locking */ if(tdata->tnumber < stolenTnumber) { g_mutex_unlock(&(stolenTdata->lock)); g_mutex_unlock(&(tdata->lock)); } else { g_mutex_unlock(&(tdata->lock)); g_mutex_unlock(&(stolenTdata->lock)); } if(nextEvent != NULL) { break; } } return nextEvent; }
/** * Parse command line options, create connection object, * start the connection and finally create database schema * * @return DBI connection handle */ dbi_conn *tagsistant_db_connection(int start_transaction) { /* DBI connection handler used by subsequent calls to dbi_* functions */ dbi_conn dbi = NULL; if (start_transaction) { g_rw_lock_writer_lock(&(tagsistant_query_rwlock)); } else { g_rw_lock_reader_lock(&(tagsistant_query_rwlock)); } /* lock the pool */ g_mutex_lock(&tagsistant_connection_pool_lock); GList *pool = tagsistant_connection_pool; while (pool) { dbi = (dbi_conn) pool->data; /* check if the connection is still alive */ if (!dbi_conn_ping(dbi) && dbi_conn_connect(dbi) < 0) { dbi_conn_close(dbi); tagsistant_connection_pool = g_list_delete_link(tagsistant_connection_pool, pool); connections--; } else { tagsistant_connection_pool = g_list_remove_link(tagsistant_connection_pool, pool); g_list_free_1(pool); break; } pool = pool->next; } /* * unlock the pool mutex only if the backend is not SQLite */ g_mutex_unlock(&tagsistant_connection_pool_lock); if (!dbi) { // initialize DBI drivers if (TAGSISTANT_DBI_MYSQL_BACKEND == dboptions.backend) { if (!tagsistant_driver_is_available("mysql")) { fprintf(stderr, "MySQL driver not installed\n"); dbg('s', LOG_ERR, "MySQL driver not installed"); exit (1); } // unlucky, MySQL does not provide INTERSECT operator tagsistant.sql_backend_have_intersect = 0; // create connection #if TAGSISTANT_REENTRANT_DBI dbi = dbi_conn_new_r("mysql", tagsistant.dbi_instance); #else dbi = dbi_conn_new("mysql"); #endif if (NULL == dbi) { dbg('s', LOG_ERR, "Error creating MySQL connection"); exit (1); } // set connection options dbi_conn_set_option(dbi, "host", dboptions.host); dbi_conn_set_option(dbi, "dbname", dboptions.db); dbi_conn_set_option(dbi, "username", dboptions.username); dbi_conn_set_option(dbi, "password", dboptions.password); dbi_conn_set_option(dbi, "encoding", "UTF-8"); } else if (TAGSISTANT_DBI_SQLITE_BACKEND == dboptions.backend) { if (!tagsistant_driver_is_available("sqlite3")) { fprintf(stderr, "SQLite3 driver not installed\n"); dbg('s', LOG_ERR, "SQLite3 driver not installed"); exit(1); } // create connection #if TAGSISTANT_REENTRANT_DBI dbi = dbi_conn_new_r("sqlite3", tagsistant.dbi_instance); #else dbi = dbi_conn_new("sqlite3"); #endif if (NULL == dbi) { dbg('s', LOG_ERR, "Error connecting to SQLite3"); exit (1); } // set connection options dbi_conn_set_option(dbi, "dbname", "tags.sql"); dbi_conn_set_option(dbi, "sqlite3_dbdir", tagsistant.repository); } else { dbg('s', LOG_ERR, "No or wrong database family specified!"); exit (1); } // try to connect if (dbi_conn_connect(dbi) < 0) { int error = dbi_conn_error(dbi, NULL); dbg('s', LOG_ERR, "Could not connect to DB (error %d). Please check the --db settings", error); exit(1); } connections++; dbg('s', LOG_INFO, "SQL connection established"); } /* start a transaction */ if (start_transaction) { #if TAGSISTANT_USE_INTERNAL_TRANSACTIONS switch (tagsistant.sql_database_driver) { case TAGSISTANT_DBI_SQLITE_BACKEND: tagsistant_query("begin transaction", dbi, NULL, NULL); break; case TAGSISTANT_DBI_MYSQL_BACKEND: tagsistant_query("start transaction", dbi, NULL, NULL); break; } #else dbi_conn_transaction_begin(dbi); #endif } return(dbi); }
int load_sql_filter_from_file(sql_filter *cur_filter) { GKeyFile *blacklist_config = NULL; GError *gerr = NULL; gsize length = 0; int i = 0; gchar **groups = NULL; g_assert(cur_filter != NULL); g_rw_lock_reader_lock(&cur_filter->sql_filter_lock); if (!(blacklist_config = chassis_frontend_open_config_file(cur_filter->blacklist_file, &gerr))) { g_debug("[filter][load from file][failed][%s]", gerr->message); g_error_free(gerr); gerr = NULL; g_rw_lock_reader_unlock(&cur_filter->sql_filter_lock); return 1; } g_rw_lock_reader_unlock(&cur_filter->sql_filter_lock); groups = g_key_file_get_groups(blacklist_config, &length); for (i = 0; groups[i] != NULL; i++) { gchar *sql_rewrite_md5 = NULL; int is_enabled = 0; int filter_status = 0; gchar *filter = g_key_file_get_value(blacklist_config, groups[i], "filter", &gerr); if (gerr != NULL) { goto next; } is_enabled = g_key_file_get_integer(blacklist_config, groups[i], "is_enabled", &gerr); if (gerr != NULL) { goto next; } filter_status = g_key_file_get_integer(blacklist_config, groups[i], "filter_status", &gerr); if (gerr != NULL) { goto next; } sql_rewrite_md5 = g_compute_checksum_for_string(G_CHECKSUM_MD5, C_S(filter)); g_rw_lock_writer_lock(&cur_filter->sql_filter_lock); sql_filter_hval *hval = sql_filter_lookup(cur_filter, sql_rewrite_md5); if (hval != NULL) { hval->flag = is_enabled; hval->filter_status = filter_status; } else { sql_filter_insert(cur_filter, filter, sql_rewrite_md5, is_enabled, filter_status); } g_rw_lock_writer_unlock(&cur_filter->sql_filter_lock); next: if (filter !=NULL) { g_free(filter); } if (sql_rewrite_md5 != NULL) { g_free(sql_rewrite_md5); } if (gerr != NULL) { g_debug("[filter][load from file][failed][%s]", gerr->message); g_error_free(gerr); gerr = NULL; } else g_message("[filter][load from file %s][success]", cur_filter->blacklist_file); } g_strfreev(groups); g_key_file_free(blacklist_config); return 0; }
/** * 更新连接池统计信息 */ void update_conn_pool_status_in_state(network_connection_pool *pool, const gchar *username, pool_status_state state) { pool_status *pool_st = NULL; g_assert(pool); g_assert(username); g_rw_lock_reader_lock(&(pool->pool_status_lock)); pool_st = g_hash_table_lookup(pool->conn_pool_status, username); if (pool_st == NULL ) { g_rw_lock_reader_unlock(&(pool->pool_status_lock)); /** @note 后端连接初始化后的特殊处理,可能需要新建一个统计信息 */ if (state == POOL_STATUS_STATE_INITIALIZED) { g_rw_lock_writer_lock(&(pool->pool_status_lock)); pool_st = g_hash_table_lookup(pool->conn_pool_status, username); if (pool_st == NULL ) { gchar *user = NULL; pool_st = pool_status_new(); if (pool_st == NULL ) { g_error("[%s]: create pool status for user failed, %s", G_STRLOC, username); g_rw_lock_writer_unlock(&(pool->pool_status_lock)); return; } user = g_strdup(username); g_hash_table_insert(pool->conn_pool_status, user, pool_st); } g_mutex_lock(&pool_st->status_mutex); pool_st->conn_num_in_pending++; g_mutex_unlock(&pool_st->status_mutex); g_rw_lock_writer_unlock(&(pool->pool_status_lock)); } return; } g_mutex_lock(&pool_st->status_mutex); switch (state) { /** 后端连接初始化 */ case POOL_STATUS_STATE_INITIALIZED: pool_st->conn_num_in_pending++; break; /** 连接放入连接池(后端连接初始化成功) */ case POOL_STATUS_STATE_PUT_INTO_POOL: pool_st->conn_num_in_idle++; /**@note Fall through*/ /** 连接没建立(后端连接初始化失败) */ case POOL_STATUS_STATE_NOT_CONNECTED: if (pool_st->conn_num_in_pending > 0) { pool_st->conn_num_in_pending--; } break; /** 从连接池取出连接 */ case POOL_STATUS_STATE_GET_FROM_POOL: if (pool_st->conn_num_in_idle > 0) { pool_st->conn_num_in_idle--; } pool_st->conn_num_in_use++; break; /** 连接归还连接池(正常) */ case POOL_STATUS_STATE_RETURN_TO_POOL: pool_st->conn_num_in_idle++; /**@note Fall through*/ /** 连接断开(异常) */ case POOL_STATUS_STATE_DISCONNECTED: if (pool_st->conn_num_in_use > 0) { pool_st->conn_num_in_use--; } break; case POOL_STATUS_STATE_REMOVE_FROM_POOL: pool_st->conn_num_in_idle--; break; default: g_assert_not_reached() ; break; } g_mutex_unlock(&pool_st->status_mutex); g_rw_lock_reader_unlock(&(pool->pool_status_lock)); return; }