static void send_timer_run(void *ptr) { struct send_timer *st = ptr; struct call *call = st->call; log_info_call(call); ilog(LOG_DEBUG, "running scheduled send_timer"); struct timeval next_send = {0,}; rwlock_lock_r(&call->master_lock); mutex_lock(&st->lock); while (st->packets.length) { struct codec_packet *cp = st->packets.head->data; // XXX this could be made lock-free if (!send_timer_send(st, cp)) { g_queue_pop_head(&st->packets); continue; } // couldn't send the last one. remember time to schedule next_send = cp->to_send; break; } mutex_unlock(&st->lock); rwlock_unlock_r(&call->master_lock); if (next_send.tv_sec) timerthread_obj_schedule_abs(&st->tt_obj, &next_send); log_info_clear(); }
const char *call_offer_ng(bencode_item_t *input, struct callmaster *m, bencode_item_t *output, const char* addr, const struct sockaddr_in6 *sin) { if (m->conf.max_sessions>0) { rwlock_lock_r(&m->hashlock); if (g_hash_table_size(m->callhash) >= m->conf.max_sessions) { rwlock_unlock_r(&m->hashlock); atomic64_inc(&m->totalstats.total_rejected_sess); atomic64_inc(&m->totalstats_interval.total_rejected_sess); ilog(LOG_ERROR, "Parallel session limit reached (%i)",m->conf.max_sessions); return "Parallel session limit reached"; } rwlock_unlock_r(&m->hashlock); } return call_offer_answer_ng(input, m, output, OP_OFFER, addr, sin); }
struct dtls_cert *dtls_cert() { struct dtls_cert *ret; rwlock_lock_r(&__dtls_cert_lock); ret = obj_get(__dtls_cert); rwlock_unlock_r(&__dtls_cert_lock); return ret; }
void ice_thread_run(void *p) { struct ice_agent *ag; struct call *call; long long sleeptime; struct timeval tv; mutex_lock(&ice_agents_timers_lock); while (!g_shutdown) { gettimeofday(&g_now, NULL); /* lock our list and get the first element */ ag = g_tree_find_first(ice_agents_timers, NULL, NULL); /* scheduled to run? if not, we just go to sleep, otherwise we remove it from the tree, * steal the reference and run it */ if (!ag) goto sleep; if (timeval_cmp(&g_now, &ag->next_check) < 0) goto sleep; g_tree_remove(ice_agents_timers, ag); ZERO(ag->next_check); ag->last_run = g_now; mutex_unlock(&ice_agents_timers_lock); /* this agent is scheduled to run right now */ /* lock the call */ call = ag->call; log_info_ice_agent(ag); rwlock_lock_r(&call->master_lock); /* and run our checks */ __do_ice_checks(ag); /* finally, release our reference and start over */ log_info_clear(); rwlock_unlock_r(&call->master_lock); obj_put(ag); mutex_lock(&ice_agents_timers_lock); continue; sleep: /* figure out how long we should sleep */ sleeptime = ag ? timeval_diff(&ag->next_check, &g_now) : 100000; sleeptime = MIN(100000, sleeptime); /* 100 ms at the most */ tv = g_now; timeval_add_usec(&tv, sleeptime); cond_timedwait(&ice_agents_timers_cond, &ice_agents_timers_lock, &tv); continue; } mutex_unlock(&ice_agents_timers_lock); }
void ng_list_calls( struct callmaster *m, bencode_item_t *output, long long int limit) { GHashTableIter iter; gpointer key, value; rwlock_lock_r(&m->hashlock); g_hash_table_iter_init (&iter, m->callhash); while (limit-- && g_hash_table_iter_next (&iter, &key, &value)) { bencode_list_add_str_dup(output, key); } rwlock_unlock_r(&m->hashlock); }
static void media_player_run(void *ptr) { struct media_player *mp = ptr; struct call *call = mp->call; log_info_call(call); ilog(LOG_DEBUG, "running scheduled media_player"); rwlock_lock_r(&call->master_lock); mutex_lock(&mp->lock); media_player_read_packet(mp); mutex_unlock(&mp->lock); rwlock_unlock_r(&call->master_lock); log_info_clear(); }
int send_graphite_data(struct totalstats *sent_data) { int rc=0; if (graphite_sock.fd < 0) { ilog(LOG_ERROR,"Graphite socket is not connected."); return -1; } // format hostname "." totals.subkey SPACE value SPACE timestamp char hostname[256]; rc = gethostname(hostname,256); if (rc<0) { ilog(LOG_ERROR, "Could not retrieve host name information."); goto error; } char data_to_send[8192]; char* ptr = data_to_send; struct totalstats *ts = sent_data; /* atomically copy values to stack and reset to zero */ atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_timeout_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_rejected_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_silent_timeout_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_regular_term_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_forced_term_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_relayed_packets); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_relayed_errors); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_nopacket_relayed_sess); atomic64_local_copy_zero_struct(ts, &cm->totalstats_interval, total_oneway_stream_sess); mutex_lock(&cm->totalstats_interval.total_average_lock); ts->total_average_call_dur = cm->totalstats_interval.total_average_call_dur; ts->total_managed_sess = cm->totalstats_interval.total_managed_sess; ZERO(cm->totalstats_interval.total_average_call_dur); ZERO(cm->totalstats_interval.total_managed_sess); mutex_unlock(&cm->totalstats_interval.total_average_lock); mutex_lock(&cm->totalstats_interval.total_calls_duration_lock); ts->total_calls_duration_interval = cm->totalstats_interval.total_calls_duration_interval; cm->totalstats_interval.total_calls_duration_interval.tv_sec = 0; cm->totalstats_interval.total_calls_duration_interval.tv_usec = 0; //ZERO(cm->totalstats_interval.total_calls_duration_interval); mutex_unlock(&cm->totalstats_interval.total_calls_duration_lock); rwlock_lock_r(&cm->hashlock); mutex_lock(&cm->totalstats_interval.managed_sess_lock); ts->managed_sess_max = cm->totalstats_interval.managed_sess_max; ts->managed_sess_min = cm->totalstats_interval.managed_sess_min; cm->totalstats_interval.managed_sess_max = cm->totalstats.managed_sess_crt; cm->totalstats_interval.managed_sess_min = cm->totalstats.managed_sess_crt; mutex_unlock(&cm->totalstats_interval.managed_sess_lock); rwlock_unlock_r(&cm->hashlock); if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr, "%s.totals.call_dur %llu.%06llu %llu\n",hostname,(unsigned long long)ts->total_calls_duration_interval.tv_sec,(unsigned long long)ts->total_calls_duration_interval.tv_usec,(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.average_call_dur %llu.%06llu %llu\n",hostname,(unsigned long long)ts->total_average_call_dur.tv_sec,(unsigned long long)ts->total_average_call_dur.tv_usec,(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.forced_term_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_forced_term_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.managed_sess "UINT64F" %llu\n",hostname, ts->total_managed_sess,(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.managed_sess_min "UINT64F" %llu\n",hostname, ts->managed_sess_min,(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.managed_sess_max "UINT64F" %llu\n",hostname, ts->managed_sess_max,(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.nopacket_relayed_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_nopacket_relayed_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.oneway_stream_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_oneway_stream_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.regular_term_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_regular_term_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.relayed_errors "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_relayed_errors),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.relayed_packets "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_relayed_packets),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.silent_timeout_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_silent_timeout_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.timeout_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_timeout_sess),(unsigned long long)g_now.tv_sec); ptr += rc; if (graphite_prefix!=NULL) { rc = sprintf(ptr,"%s.",graphite_prefix); ptr += rc; } rc = sprintf(ptr,"%s.totals.reject_sess "UINT64F" %llu\n",hostname, atomic64_get_na(&ts->total_rejected_sess),(unsigned long long)g_now.tv_sec); ptr += rc; ilog(LOG_DEBUG, "min_sessions:%llu max_sessions:%llu, call_dur_per_interval:%llu.%06llu at time %llu\n", (unsigned long long) ts->managed_sess_min, (unsigned long long) ts->managed_sess_max, (unsigned long long ) ts->total_calls_duration_interval.tv_sec, (unsigned long long ) ts->total_calls_duration_interval.tv_usec, (unsigned long long ) g_now.tv_sec); rc = write(graphite_sock.fd, data_to_send, ptr - data_to_send); if (rc<0) { ilog(LOG_ERROR,"Could not write to graphite socket. Disconnecting graphite server."); goto error; } return 0; error: close_socket(&graphite_sock); return -1; }