static void status_handler(struct mbuf *mb) { (void)mbuf_printf(mb, "TURN relay=%j relay6=%j (err %llu/%llu)\n", &turnd.rel_addr, &turnd.rel_addr6, turnd.errc_tx, turnd.errc_rx); (void)hash_apply(turnd.ht_alloc, allocation_status, mb); }
void chan_status(const struct chanlist *cl, struct mbuf *mb) { if (!cl || !mb) return; (void)mbuf_printf(mb, " channels: "); (void)hash_apply(cl->ht_numb, status_handler, mb); (void)mbuf_printf(mb, "\n"); }
int sip_ctrans_debug(struct re_printf *pf, const struct sip *sip) { int err; err = re_hprintf(pf, "client transactions:\n"); hash_apply(sip->ht_ctrans, debug_handler, pf); return err; }
static void upd(struct tcuplinks *ups, struct list*upl) { hash_alloc(&ups->uris_c, 16); list_apply(upl, true, apply_add, ups); if(ups->data_c) { hash_apply(ups->data_c, apply_rm, ups); } }
static int encode_handler(struct mbuf *mb, void *arg) { struct hash *members = arg; /* copy all report blocks */ if (hash_apply(members, sender_apply_handler, mb)) return ENOMEM; return 0; }
static void tcp_estab_handler(void *arg) { DEBUG_INFO("connection established\n"); int ok; struct request * request = arg; struct mbuf *mb; char CN[256]; if(request->secure) { ok = tls_verify_cert(request->ssl, CN, sizeof(CN)); if(ok!=0) goto fail; DEBUG_INFO("https CN %s\n", CN); ok = strcmp(request->host, CN); if(ok!=0) goto fail; } mb = mbuf_alloc(1024); mbuf_printf(mb, "%s %s HTTP/1.1\r\n", request->meth, request->path); mbuf_printf(mb, "Host: %s\r\n", request->host); write_auth(request, mb); mbuf_write_str(mb, "Connection: close\r\n"); hash_apply(request->hdrht, hdr_write, mb); hash_flush(request->hdrht); if(request->post) { request->post->pos = 0; mbuf_printf(mb, "Content-Length: %d\r\n", mbuf_get_left(request->post)); if(request->form) mbuf_printf(mb, "Content-Type: " "application/x-www-form-urlencoded\r\n"); mbuf_printf(mb, "\r\n"); mbuf_write_mem(mb, mbuf_buf(request->post), mbuf_get_left(request->post)); } else { mbuf_write_str(mb, "\r\n"); } mb->pos = 0; tcp_send(request->tcp, mb); mem_deref(mb); return; fail: DEBUG_WARNING("ssl fail %p %d\n", request->app->tls, ok); }
static void dnsc_destructor(void *data) { struct dnsc *dnsc = data; (void)hash_apply(dnsc->ht_query, query_close_handler, NULL); hash_flush(dnsc->ht_tcpconn); mem_deref(dnsc->ht_tcpconn); mem_deref(dnsc->ht_query); mem_deref(dnsc->us); }
void lease_merge_exit(Worker *worker, Lease *parent, Lease *child) { Claim *claim; assert(parent->wait_for_update == worker && child->wait_for_update == worker); assert(parent->inflight == 0 && child->inflight == 0); assert(!child->isexit && !parent->isexit); /* assert(!parent->readonly); assert(!child->readonly); */ /* prevent lookups on the old lease */ hash_remove(lease_by_root_pathname, child->pathname); /* find the immediate parent and add this child */ claim = claim_find(worker, dirname(child->pathname)); assert(claim != NULL && claim->lease == parent); claim_link_child(claim, child->claim); /* merge the fids */ hash_apply(child->fids, (void (*)(void *, void *, void *)) hash_set, parent->fids); child->fids = NULL; /* merge the claim cache */ hash_apply(child->claim_cache, (void (*)(void *, void *, void *)) lease_merge_iter_claim_cache, parent); child->claim_cache = NULL; /* merge the dir cache */ hash_apply(child->dir_cache, (void (*)(void *, void *, void *)) lease_merge_iter_dir_cache, parent); child->dir_cache = NULL; /* merge in the wavefront */ for ( ; !null(child->wavefront); child->wavefront = cdr(child->wavefront)) lease_link_exit((Lease *) car(child->wavefront)); parent->lastchange = now_double(); }
int http_response_header(struct request *req, char *name, char **rp) { int err = 0; *rp = NULL; struct hdr_fetch op; op.name = name; op.val = rp; hash_apply(req->hdrht, hdr_fetch, &op); return err; }
struct frame *select_fm (void) { void *end = clock_point_max; if (clock_point != clock_point_init) end = clock_point - PGSIZE; struct frame *fm; for (;clock_point != end; clock_point += PGSIZE){ if (clock_point >= clock_point_max) clock_point = clock_point_init; fm = find_fm(clock_point); if (!fm){ continue; } else if (fm->locked || fm->isPinned){ if (if_fm_accessed(fm)){ hash_apply(&fm->ht_thread_uaddr, set_page_unaccessed); } continue; } else { if (if_fm_accessed(fm)){ hash_apply(&fm->ht_thread_uaddr, set_page_unaccessed); } else { return fm; } } } fm = NULL; while(!fm){ hash_first(&fmt_iter, &frames_table); while(hash_next(&fmt_iter)){ fm = hash_entry(hash_cur(&fmt_iter), struct frame, elem); if (!fm->locked && !fm->isPinned){ return fm; } } cond_wait(&frame_table_cond, &frame_table_lock); } }
void lease_snapshot(Worker *worker, Claim *claim) { List *allexits = claim->lease->wavefront; List *exits = NULL; List *newoids = NULL; lock_lease_exclusive(worker, claim->lease); /* start by freezing everything */ hash_apply(claim->lease->claim_cache, (void (*)(void *, void *, void *)) make_claim_cow, claim->pathname); /* recursively snapshot all the child leases */ for ( ; !null(allexits); allexits = cdr(allexits)) { Lease *lease = car(allexits); if (ispathprefix(lease->pathname, claim->pathname)) exits = cons(lease, exits); } if (!null(exits)) newoids = remote_snapshot(worker, exits); /* now clone paths to the exits and update the exit parent dirs */ while (!null(exits) && !null(newoids)) { Lease *exit = car(exits); u64 *newoid = car(newoids); u64 oldoid; Claim *parent; parent = claim_find(worker, dirname(exit->pathname)); claim_thaw(worker, parent); oldoid = dir_change_oid(worker, parent, filename(exit->pathname), *newoid, ACCESS_WRITEABLE); assert(oldoid != NOOID); exits = cdr(exits); newoids = cdr(newoids); } }
/** * RTCP Debug handler, use with fmt %H * * @param pf Print function * @param rs RTP Socket * * @return 0 if success, otherwise errorcode */ int rtcp_debug(struct re_printf *pf, const struct rtp_sock *rs) { const struct rtcp_sess *sess = rtp_rtcp_sess(rs); int err = 0; if (!sess) return 0; err |= re_hprintf(pf, "----- RTCP Session: -----\n"); err |= re_hprintf(pf, " cname=%s SSRC=0x%08x/%u rx=%uHz\n", sess->cname, rtp_sess_ssrc(sess->rs), rtp_sess_ssrc(sess->rs), sess->srate_rx); hash_apply(sess->members, debug_handler, pf); lock_read_get(sess->lock); err |= re_hprintf(pf, " TX: packets=%u, octets=%u\n", sess->txstat.psent, sess->txstat.osent); lock_rel(sess->lock); return err; }
void output_run_line( const output_type * output , ensemble_type * ensemble) { const int data_columns = vector_get_size( output->keys ); const int data_rows = time_t_vector_size( ensemble->interp_time ); double ** data; int row_nr, column_nr; data = util_calloc( data_rows , sizeof * data ); /* time-direction, i.e. the row index is the first index and the column number (i.e. the different keys) is the second index. */ for (row_nr=0; row_nr < data_rows; row_nr++) data[row_nr] = util_calloc( data_columns , sizeof * data[row_nr] ); printf("Creating output file: %s \n",output->file ); /* Go through all the cases and check that they have this key; exit if missing. Could also ignore the missing keys and just continue; and even defer the checking to the inner loop. */ for (column_nr = 0; column_nr < vector_get_size( output->keys ); column_nr++) { const quant_key_type * qkey = vector_iget( output->keys , column_nr ); { bool OK = true; for (int iens = 0; iens < vector_get_size( ensemble->data ); iens++) { const sum_case_type * sum_case = vector_iget_const( ensemble->data , iens ); if (!ecl_sum_has_general_var(sum_case->ecl_sum , qkey->sum_key)) { OK = false; fprintf(stderr,"** Sorry: the case:%s does not have the summary key:%s \n", ecl_sum_get_case( sum_case->ecl_sum ), qkey->sum_key); } } if (!OK) util_exit("Exiting due to missing summary vector(s).\n"); } } /* The main loop - outer loop is running over time. */ { /** In the quite typical case that we are asking for several quantiles of the quantity, i.e. WWCT:OP_1:0.10 WWCT:OP_1:0.50 WWCT:OP_1:0.90 the interp_data_cache construction will ensure that the underlying ecl_sum object is only queried once; and also the sorting will be performed once. */ hash_type * interp_data_cache = hash_alloc(); for (row_nr = 0; row_nr < data_rows; row_nr++) { time_t interp_time = time_t_vector_iget( ensemble->interp_time , row_nr); for (column_nr = 0; column_nr < vector_get_size( output->keys ); column_nr++) { const quant_key_type * qkey = vector_iget( output->keys , column_nr ); double_vector_type * interp_data; /* Check if we have the vector in the cache table - if not create it. */ if (!hash_has_key( interp_data_cache , qkey->sum_key)) { interp_data = double_vector_alloc(0 , 0); hash_insert_hash_owned_ref( interp_data_cache , qkey->sum_key , interp_data , double_vector_free__); } interp_data = hash_get( interp_data_cache , qkey->sum_key ); /* Check if the vector has data - if not initialize it. */ if (double_vector_size( interp_data ) == 0) { for (int iens = 0; iens < vector_get_size( ensemble->data ); iens++) { const sum_case_type * sum_case = vector_iget_const( ensemble->data , iens ); if ((interp_time >= sum_case->start_time) && (interp_time <= sum_case->end_time)) /* We allow the different simulations to have differing length */ double_vector_append( interp_data , ecl_sum_get_general_var_from_sim_time( sum_case->ecl_sum , interp_time , qkey->sum_key)) ; double_vector_sort( interp_data ); } } data[row_nr][column_nr] = statistics_empirical_quantile__( interp_data , qkey->quantile ); } hash_apply( interp_data_cache , double_vector_reset__ ); } hash_free( interp_data_cache ); } output_save( output , ensemble , (const double **) data); for (row_nr=0; row_nr < data_rows; row_nr++) free( data[row_nr] ); free( data ); }