/*! \brief very inefficient for now - FIXME * keep in sync with tcpconn_destroy, the "delete" part should be * the same except for io_watch_del.. * \todo FIXME (very inefficient for now) */ static inline void __tcpconn_lifetime(int force) { struct tcp_connection *c, *next; unsigned int ticks,part; unsigned h; int fd; if (have_ticks()) ticks=get_ticks(); else ticks=0; for( part=0 ; part<TCP_PARTITION_SIZE ; part++ ) { TCPCONN_LOCK(part); /* fixme: we can lock only on delete IMO */ for(h=0; h<TCP_ID_HASH_SIZE; h++){ c=TCP_PART(part).tcpconn_id_hash[h]; while(c){ next=c->id_next; if (force ||((c->refcnt==0) && (ticks>c->lifetime))) { if (!force) LM_DBG("timeout for hash=%d - %p" " (%d > %d)\n", h, c, ticks, c->lifetime); fd=c->s; _tcpconn_rm(c); if ((!force)&&(fd>0)&&(c->refcnt==0)) { if (!(c->flags & F_CONN_REMOVED)){ reactor_del_all( fd, -1, IO_FD_CLOSING); c->flags|=F_CONN_REMOVED; } close(fd); } tcp_connections_no--; } c=next; } } TCPCONN_UNLOCK(part); } }
/*! \brief releases expired connections and cleans up bad ones (state<0) */ static inline void tcp_receive_timeout(void) { struct tcp_connection* con; struct tcp_connection* next; unsigned int ticks; ticks=get_ticks(); for (con=tcp_conn_lst; con; con=next) { next=con->c_next; /* safe for removing */ if (con->state<0){ /* kill bad connections */ /* S_CONN_BAD or S_CONN_ERROR, remove it */ /* fd will be closed in tcpconn_release */ reactor_del_reader(con->fd, -1/*idx*/, IO_FD_CLOSING/*io_flags*/ ); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); con->proc_id = -1; con->state=S_CONN_BAD; if (con->fd!=-1) { close(con->fd); con->fd = -1; } tcpconn_release(con, CONN_ERROR,0); continue; } if (con->timeout<=ticks){ LM_DBG("%p expired - (%d, %d) lt=%d\n", con, con->timeout, ticks,con->lifetime); /* fd will be closed in tcpconn_release */ reactor_del_reader(con->fd, -1/*idx*/, IO_FD_CLOSING/*io_flags*/ ); tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev); /* connection is going to main */ con->proc_id = -1; if (con->fd!=-1) { close(con->fd); con->fd = -1; } if (con->msg_attempts) tcpconn_release(con, CONN_ERROR,0); else tcpconn_release(con, CONN_RELEASE,0); } } }
/*! \brief very inefficient for now - FIXME * keep in sync with tcpconn_destroy, the "delete" part should be * the same except for io_watch_del.. * \todo FIXME (very inefficient for now) */ static inline void tcpconn_timeout(int force) { struct tcp_connection *c, *next; unsigned int ticks; unsigned h; int fd; ticks=get_ticks(); TCPCONN_LOCK; /* fixme: we can lock only on delete IMO */ for(h=0; h<TCP_ID_HASH_SIZE; h++){ c=tcpconn_id_hash[h]; while(c){ next=c->id_next; if (force ||((c->refcnt==0) && (ticks>c->timeout))) { if (!force) LM_DBG("timeout for hash=%d - %p" " (%d > %d)\n", h, c, ticks, c->timeout); fd=c->s; #ifdef USE_TLS if (c->type==PROTO_TLS) tls_close(c, fd); #endif _tcpconn_rm(c); if ((!force)&&(fd>0)&&(c->refcnt==0)) { if (!(c->flags & F_CONN_REMOVED)){ io_watch_del(&io_h, fd, -1, IO_FD_CLOSING); c->flags|=F_CONN_REMOVED; } close(fd); } tcp_connections_no--; } c=next; } } TCPCONN_UNLOCK; }
int pv_get_dlg_timeout(struct sip_msg *msg, pv_param_t *param, pv_value_t *res) { int l = 0; char *ch = NULL; struct dlg_cell *dlg; if(res==NULL) return -1; if ( (dlg=get_current_dialog())!=NULL ) { dlg_lock_dlg(dlg); if (dlg->state == DLG_STATE_DELETED) l = 0; else if (dlg->state < DLG_STATE_CONFIRMED_NA) l = dlg->lifetime; else l = dlg->tl.timeout - get_ticks(); dlg_unlock_dlg(dlg); } else if (current_processing_ctx) { if ((l=ctx_timeout_get())==0) return pv_get_null( msg, param, res); } else { return pv_get_null( msg, param, res); } res->ri = l; ch = int2str( (unsigned long)res->ri, &l); res->rs.s = ch; res->rs.len = l; res->flags = PV_VAL_STR|PV_VAL_INT|PV_TYPE_INT; return 0; }
static int add_write_chunk(struct tcp_connection *con,char *buf,int len, int lock) { struct hep_send_chunk *c; struct hep_data *d = (struct hep_data*)con->proto_data; c = shm_malloc(sizeof(struct hep_send_chunk) + len); if (!c) { LM_ERR("No more SHM\n"); return -1; } c->len = len; c->ticks = get_ticks(); c->buf = (char *)(c+1); memcpy(c->buf,buf,len); c->pos = c->buf; if (lock) lock_get(&con->write_lock); if (d->async_chunks_no == hep_async_max_postponed_chunks) { LM_ERR("We have reached the limit of max async postponed chunks\n"); if (lock) lock_release(&con->write_lock); shm_free(c); return -2; } d->async_chunks[d->async_chunks_no++] = c; if (d->async_chunks_no == 1) d->oldest_chunk = c->ticks; if (lock) lock_release(&con->write_lock); return 0; }
/* Returns: 0 : ACK to a local reply -1 : error 1 : is not an ACK or a non-local ACK */ int sl_filter_ACK(struct sip_msg *msg, void *bar ) { str *tag_str; if (msg->first_line.u.request.method_value!=METHOD_ACK) goto pass_it; /*check the timeout value*/ if ( *(sl_timeout)<= get_ticks() ) { DBG("DEBUG : sl_filter_ACK: to late to be a local ACK!\n"); goto pass_it; } /*force to parse to header -> we need it for tag param*/ if (parse_headers( msg, HDR_TO_F, 0 )==-1) { LOG(L_ERR,"ERROR : SL_FILTER_ACK: unable to parse To header\n"); return -1; } if (msg->to) { tag_str = &(get_to(msg)->tag_value); if ( tag_str->len==TOTAG_VALUE_LEN ) { /* calculate the variable part of to-tag */ calc_crc_suffix(msg, tag_suffix); /* test whether to-tag equal now */ if (memcmp(tag_str->s,sl_tag.s,sl_tag.len)==0) { DBG("DEBUG: sl_filter_ACK : local ACK found -> dropping it! \n" ); return 0; } } } pass_it: return 1; }
ticks Lucky13Test::measure_critical_function(std::vector<byte> input) { Botan::secure_vector<byte> data(input.begin(), input.end()); Botan::secure_vector<byte> aad(13); const Botan::secure_vector<byte> iv(16); Botan::secure_vector<byte> key(16 + m_mac_keylen); m_dec.set_key(unlock(key)); m_dec.set_ad(unlock(aad)); m_dec.start(unlock(iv)); ticks start = this->get_ticks(); try { m_dec.finish(data); } catch (Botan::TLS::TLS_Exception e) { } ticks end = get_ticks(); return (end - start); }
/** * Create a table entry for a KLD */ static void kld_file_stat_to_swrun(const struct kld_file_stat *kfs, struct swrun_entry *entry) { size_t name_len; assert(kfs != NULL); assert(entry != NULL); name_len = strlen(kfs->name) + 1; if (name_len > SWR_NAME_MLEN) name_len = SWR_NAME_MLEN; entry->name = reallocf(entry->name, name_len); if (entry->name != NULL) strlcpy((char *)entry->name, kfs->name, name_len); /* FIXME: can we find the location where the module was loaded from? */ entry->path = NULL; /* no parameters for kernel files (.ko) of for the kernel */ entry->parameters = NULL; entry->id = &oid_zeroDotZero; /* unknown id - FIXME */ if (strcmp(kfs->name, "kernel") == 0) { entry->type = (int32_t)SRT_OPERATING_SYSTEM; SWOSIndex = entry->index; } else { entry->type = (int32_t)SRT_DEVICE_DRIVER; /* well, not really */ } entry->status = (int32_t)SRS_RUNNING; entry->perfCPU = 0; /* Info not available */ entry->perfMemory = kfs->size / 1024; /* in kilo-bytes */ entry->r_tick = get_ticks(); }
/** * add a new element in messages queue * - jcp : pointer to the Jabber connections pool structure * - _jsm : pointer to the message * - _ojc : pointer to the Jabber connection that will be used for this message * return : 0 on success or <0 on error */ int xj_jcon_pool_add_jmsg(xj_jcon_pool jcp, xj_sipmsg _jsm, xj_jcon _ojc) { int i; if(jcp == NULL) return -1; if(jcp->jmqueue.size == jcp->jmqueue.len) return -2; #ifdef XJ_EXTRA_DEBUG LM_DBG("add msg into the pool\n"); #endif for(i = 0; i<jcp->jmqueue.len; i++) { if(jcp->jmqueue.jsm[i] == NULL || jcp->jmqueue.ojc[i] == NULL) { jcp->jmqueue.size++; jcp->jmqueue.expire[i] = get_ticks() + jcp->jmqueue.cache; jcp->jmqueue.jsm[i] = _jsm; jcp->jmqueue.ojc[i] = _ojc; return 0; } } return -2; }
void __udelay(unsigned long usec) #endif { ulong tmo, tmp; DBGOUT("+udelay=%d\n", usec); if (! g_inittimer) timer_init(); if (usec >= 1000) { // if "big" number, spread normalization to seconds // tmo = usec / 1000; // start to normalize for usec to ticks per sec // tmo *= TIMER_FREQ; // find number of "ticks" to wait to achieve target // tmo /= 1000; // finish normalize. // } else { // else small number, don't kill it prior to HZ multiply // tmo = usec * TIMER_FREQ; tmo /= (1000*1000); } tmp = get_ticks(); // get current timestamp // DBGOUT("tmo=%d, tmp=%d\n", tmo, tmp); if ( tmp > (tmo + tmp + 1) ) // if setting this fordward will roll time stamp // reset_timer_masked(); // reset "advancing" timestamp to 0, set lastdec value // else tmo += tmp; // else, set advancing stamp wake up time // while (tmo > get_timer_masked ())// loop till event // { //*NOP*/; } DBGOUT("-udelay=%d\n", usec); return; }
ulong get_timer_masked(void) { return tick_to_time(get_ticks()); }
int b2b_logic_restore(void) { int i; int nr_rows; int _time; db_res_t *result= NULL; db_row_t *rows = NULL; db_val_t *row_vals= NULL; b2bl_tuple_t tuple; str b2bl_key; str scenario_id; b2bl_entity_id_t bridge_entities[3]; str* params[MAX_SCENARIO_PARAMS]; if(b2bl_db == NULL) { LM_DBG("NULL database connection\n"); return 0; } if(b2bl_dbf.use_table(b2bl_db, &b2bl_dbtable)< 0) { LM_ERR("sql use table failed\n"); return -1; } if (DB_CAPABILITY(b2bl_dbf, DB_CAP_FETCH)) { if(b2bl_dbf.query(b2bl_db, 0, 0, 0, qcols, 0, DB_COLS_NO, 0, 0) < 0) { LM_ERR("Error while querying (fetch) database\n"); return -1; } if(b2bl_dbf.fetch_result(b2bl_db,&result,B2BL_FETCH_SIZE)<0) { LM_ERR("fetching rows failed\n"); return -1; } } else { if (b2bl_dbf.query(b2bl_db, 0, 0, 0, qcols, 0, DB_COLS_NO, 0, &result) < 0) { LM_ERR("querying presentity\n"); return -1; } } nr_rows = RES_ROW_N(result); do { LM_DBG("loading [%i] records from db\n", nr_rows); rows = RES_ROWS(result); /* for every row */ for(i=0; i<nr_rows; i++) { row_vals = ROW_VALUES(rows +i); memset(&tuple, 0, sizeof(b2bl_tuple_t)); b2bl_key.s = (char*)row_vals[0].val.string_val; b2bl_key.len = b2bl_key.s?strlen(b2bl_key.s):0; tuple.key = &b2bl_key; if(row_vals[1].val.string_val) { scenario_id.s = (char*)row_vals[1].val.string_val; scenario_id.len = strlen(scenario_id.s); tuple.scenario = get_scenario_id(&scenario_id); } memset(bridge_entities, 0, 3*sizeof(b2bl_entity_id_t)); memset(params, 0, MAX_SCENARIO_PARAMS* sizeof(str*)); if(row_vals[2].val.string_val) { tuple.scenario_params[0].s =(char*)row_vals[2].val.string_val; tuple.scenario_params[0].len = strlen(tuple.scenario_params[0].s); params[0] = &tuple.scenario_params[0]; } if(row_vals[3].val.string_val) { tuple.scenario_params[1].s =(char*)row_vals[3].val.string_val; tuple.scenario_params[1].len = strlen(tuple.scenario_params[1].s); params[1] = &tuple.scenario_params[1]; } if(row_vals[4].val.string_val) { tuple.scenario_params[2].s =(char*)row_vals[4].val.string_val; tuple.scenario_params[2].len = strlen(tuple.scenario_params[2].s); params[2] = &tuple.scenario_params[2]; } if(row_vals[5].val.string_val) { tuple.scenario_params[3].s =(char*)row_vals[5].val.string_val; tuple.scenario_params[3].len = strlen(tuple.scenario_params[3].s); params[3] = &tuple.scenario_params[3]; } if(row_vals[6].val.string_val) { tuple.scenario_params[4].s =(char*)row_vals[6].val.string_val; tuple.scenario_params[4].len = strlen(tuple.scenario_params[4].s); params[4] = &tuple.scenario_params[4]; } if(row_vals[7].val.string_val) { tuple.sdp.s =(char*)row_vals[7].val.string_val; tuple.sdp.len = strlen(tuple.sdp.s); } tuple.scenario_state =row_vals[8].val.int_val; tuple.next_scenario_state=row_vals[9].val.int_val; _time = (int)time(NULL); if (row_vals[10].val.int_val <= _time) tuple.lifetime = 1; else tuple.lifetime=row_vals[10].val.int_val - _time + get_ticks(); bridge_entities[0].type = row_vals[11].val.int_val; bridge_entities[0].scenario_id.s =(char*)row_vals[12].val.string_val; bridge_entities[0].scenario_id.len= bridge_entities[0].scenario_id.s?strlen(bridge_entities[0].scenario_id.s):0; bridge_entities[0].to_uri.s =(char*)row_vals[13].val.string_val; bridge_entities[0].to_uri.len= bridge_entities[0].to_uri.s?strlen(bridge_entities[0].to_uri.s):0; bridge_entities[0].from_uri.s=(char*)row_vals[14].val.string_val; bridge_entities[0].from_uri.len= bridge_entities[0].from_uri.s?strlen(bridge_entities[0].from_uri.s):0; bridge_entities[0].key.s =(char*)row_vals[15].val.string_val; bridge_entities[0].key.len= bridge_entities[0].key.s?strlen(bridge_entities[0].key.s):0; bridge_entities[1].type = row_vals[16].val.int_val; bridge_entities[1].scenario_id.s = (char*)row_vals[17].val.string_val; bridge_entities[1].scenario_id.len= bridge_entities[1].scenario_id.s?strlen(bridge_entities[1].scenario_id.s):0; bridge_entities[1].to_uri.s = (char*)row_vals[18].val.string_val; bridge_entities[1].to_uri.len= bridge_entities[1].to_uri.s?strlen(bridge_entities[1].to_uri.s):0; bridge_entities[1].from_uri.s = (char*)row_vals[19].val.string_val; bridge_entities[1].from_uri.len= bridge_entities[1].from_uri.s?strlen(bridge_entities[1].from_uri.s):0; bridge_entities[1].key.s = (char*)row_vals[20].val.string_val; bridge_entities[1].key.len= bridge_entities[1].key.s?strlen(bridge_entities[1].key.s):0; if(row_vals[21].val.string_val) { bridge_entities[2].type = row_vals[21].val.int_val; bridge_entities[2].scenario_id.s = (char*)row_vals[22].val.string_val; bridge_entities[2].scenario_id.len= bridge_entities[2].scenario_id.s?strlen(bridge_entities[2].scenario_id.s):0; bridge_entities[2].to_uri.s = (char*)row_vals[23].val.string_val; bridge_entities[2].to_uri.len= bridge_entities[2].to_uri.s?strlen(bridge_entities[2].to_uri.s):0; bridge_entities[2].from_uri.s = (char*)row_vals[24].val.string_val; bridge_entities[2].from_uri.len= bridge_entities[2].from_uri.s?strlen(bridge_entities[2].from_uri.s):0; bridge_entities[2].key.s = (char*)row_vals[25].val.string_val; bridge_entities[2].key.len= bridge_entities[2].key.s?strlen(bridge_entities[2].key.s):0; } tuple.bridge_entities[0] = &bridge_entities[0]; tuple.bridge_entities[1] = &bridge_entities[1]; tuple.bridge_entities[2] = &bridge_entities[2]; if(b2bl_add_tuple(&tuple, params) < 0) { LM_ERR("Failed to add new tuple\n"); goto error; } } /* any more data to be fetched ?*/ if (DB_CAPABILITY(b2bl_dbf, DB_CAP_FETCH)) { if (b2bl_dbf.fetch_result( b2bl_db, &result, B2BL_FETCH_SIZE ) < 0) { LM_ERR("fetching more rows failed\n"); goto error; } nr_rows = RES_ROW_N(result); } else { nr_rows = 0; } }while (nr_rows>0); b2bl_dbf.free_result(b2bl_db, result); LM_DBG("Finished\n"); return 0; error: if(result) b2bl_dbf.free_result(b2bl_db, result); return -1; }
void b2b_logic_dump(int no_lock) { b2bl_tuple_t* tuple; int i; int n_insert_cols; if(b2bl_dbf.use_table(b2bl_db, &b2bl_dbtable)< 0) { LM_ERR("sql use table failed\n"); return; } for(i = 0; i< b2bl_hsize; i++) { if(!no_lock) lock_get(&b2bl_htable[i].lock); tuple = b2bl_htable[i].first; while(tuple) { /* check the state of the scenario instantiation */ if(tuple->db_flag == NO_UPDATEDB_FLAG) goto next; if(tuple->key == NULL) { LM_ERR("No key stored\n"); goto next; } if(tuple->bridge_entities[0]==NULL || tuple->bridge_entities[1]== NULL) { LM_ERR("Bridge entities is NULL\n"); if(tuple->bridge_entities[0]==NULL) LM_DBG("0 NULL\n"); else LM_DBG("1 NULL\n"); goto next; } qvals[0].val.str_val = *tuple->key; if(tuple->db_flag == INSERTDB_FLAG) { if(tuple->scenario) qvals[1].val.str_val = tuple->scenario->id; else{ qvals[1].val.str_val.len = 0; qvals[1].val.str_val.s = ""; } qvals[2].val.str_val = tuple->scenario_params[0]; qvals[3].val.str_val = tuple->scenario_params[1]; qvals[4].val.str_val = tuple->scenario_params[2]; qvals[5].val.str_val = tuple->scenario_params[3]; qvals[6].val.str_val = tuple->scenario_params[4]; qvals[7].val.str_val = tuple->sdp; } qvals[8].val.int_val = tuple->scenario_state; qvals[9].val.int_val = tuple->next_scenario_state; qvals[10].val.int_val = tuple->lifetime - get_ticks() + (int)time(NULL); qvals[11].val.int_val = tuple->bridge_entities[0]->type; qvals[12].val.str_val = tuple->bridge_entities[0]->scenario_id; qvals[13].val.str_val = tuple->bridge_entities[0]->to_uri; qvals[14].val.str_val = tuple->bridge_entities[0]->from_uri; qvals[15].val.str_val = tuple->bridge_entities[0]->key; qvals[16].val.int_val = tuple->bridge_entities[1]->type; qvals[17].val.str_val = tuple->bridge_entities[1]->scenario_id; qvals[18].val.str_val = tuple->bridge_entities[1]->to_uri; qvals[19].val.str_val = tuple->bridge_entities[1]->from_uri; qvals[20].val.str_val = tuple->bridge_entities[1]->key; n_insert_cols = 21; if(tuple->bridge_entities[2]) { qvals[21].val.int_val = tuple->bridge_entities[2]->type; qvals[22].val.str_val = tuple->bridge_entities[2]->scenario_id; qvals[23].val.str_val = tuple->bridge_entities[2]->to_uri; qvals[24].val.str_val = tuple->bridge_entities[2]->from_uri; qvals[25].val.str_val = tuple->bridge_entities[2]->key; } n_insert_cols = DB_COLS_NO; /* insert into database */ if(tuple->db_flag == INSERTDB_FLAG) { if(b2bl_dbf.insert(b2bl_db, qcols, qvals, n_insert_cols)< 0) { LM_ERR("Sql insert failed\n"); if(!no_lock) lock_release(&b2bl_htable[i].lock); return; } } else { /*do update */ if(b2bl_dbf.update(b2bl_db, qcols, 0, qvals, qcols+n_query_update, qvals+n_query_update, 1, DB_COLS_NO - n_query_update)< 0) { LM_ERR("Sql update failed\n"); if(!no_lock) lock_release(&b2bl_htable[i].lock); return; } } tuple->db_flag = NO_UPDATEDB_FLAG; next: tuple = tuple->next; } if(!no_lock) lock_release(&b2bl_htable[i].lock); } }
uint64_t ppc_clocksource_read(void) { return get_ticks(); }
void Timer::start(unsigned int period_) { time = get_ticks(); period = period_; }
int Timer::get_gone() { return (get_ticks() - time); }
/** * check if there are msg to send or delete from queue */ void xj_worker_check_qmsg(xj_wlist jwl, xj_jcon_pool jcp) { int i, flag; str sto; char buff[1024]; if(!jwl || !jcp) return; /** check the msg queue AND if the target connection is ready */ for(i = 0; i<jcp->jmqueue.size && main_loop; i++) { if(jcp->jmqueue.jsm[i]==NULL || jcp->jmqueue.ojc[i]==NULL) { if(jcp->jmqueue.jsm[i]!=NULL) { xj_sipmsg_free(jcp->jmqueue.jsm[i]); jcp->jmqueue.jsm[i] = NULL; xj_jcon_pool_del_jmsg(jcp, i); } if(jcp->jmqueue.ojc[i]!=NULL) xj_jcon_pool_del_jmsg(jcp, i); continue; } if(jcp->jmqueue.expire[i] < get_ticks()) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker_check_qmsg:%d: message to %.*s is expired\n", _xj_pid, jcp->jmqueue.jsm[i]->to.len, jcp->jmqueue.jsm[i]->to.s); #endif xj_send_sip_msgz(_PADDR(jwl), jcp->jmqueue.jsm[i]->jkey->id, &jcp->jmqueue.jsm[i]->to, XJ_DMSG_ERR_SENDIM, &jcp->jmqueue.ojc[i]->jkey->flag); if(jcp->jmqueue.jsm[i]!=NULL) { xj_sipmsg_free(jcp->jmqueue.jsm[i]); jcp->jmqueue.jsm[i] = NULL; } /** delete message from queue */ xj_jcon_pool_del_jmsg(jcp, i); continue; } #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker_check_qmsg:%d:%d: QUEUE: message[%d] from [%.*s]" "/to [%.*s]/body[%.*s] expires at %d\n", _xj_pid, get_ticks(), i, jcp->jmqueue.jsm[i]->jkey->id->len, jcp->jmqueue.jsm[i]->jkey->id->s, jcp->jmqueue.jsm[i]->to.len,jcp->jmqueue.jsm[i]->to.s, jcp->jmqueue.jsm[i]->msg.len,jcp->jmqueue.jsm[i]->msg.s, jcp->jmqueue.expire[i]); #endif if(xj_jcon_is_ready(jcp->jmqueue.ojc[i], jcp->jmqueue.jsm[i]->to.s, jcp->jmqueue.jsm[i]->to.len, jwl->aliases->dlm)) continue; /*** address correction ***/ flag = XJ_ADDRTR_S2J; if(!xj_jconf_check_addr(&jcp->jmqueue.jsm[i]->to,jwl->aliases->dlm)) flag |= XJ_ADDRTR_CON; sto.s = buff; sto.len = 0; if(xj_address_translation(&jcp->jmqueue.jsm[i]->to, &sto, jwl->aliases, flag) == 0) { /** send message from queue */ #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker_check_qmsg:%d: SENDING the message from" " local queue to Jabber network ...\n", _xj_pid); #endif xj_jcon_send_msg(jcp->jmqueue.ojc[i], sto.s, sto.len, jcp->jmqueue.jsm[i]->msg.s, jcp->jmqueue.jsm[i]->msg.len, (flag&XJ_ADDRTR_CON)?XJ_JMSG_GROUPCHAT:XJ_JMSG_CHAT); } else DBG("XJAB:xj_worker_check_qmsg:%d: ERROR SENDING the message from" " local queue to Jabber network ...\n", _xj_pid); if(jcp->jmqueue.jsm[i]!=NULL) { xj_sipmsg_free(jcp->jmqueue.jsm[i]); jcp->jmqueue.jsm[i] = NULL; } /** delete message from queue */ xj_jcon_pool_del_jmsg(jcp, i); } }
static void show_logo(UINT32 index) { UINT32 logonum; UINT32 logolen; UINT32 inaddr; void *fb_addr = mt65xx_get_fb_addr(); UINT32 fb_size = mt65xx_get_fb_size(); void *db_addr = mt65xx_get_logo_db_addr(); unsigned int *pinfo = (unsigned int*)db_addr; logonum = pinfo[0]; ASSERT(index < logonum); if(index < logonum) logolen = pinfo[3+index] - pinfo[2+index]; else logolen = pinfo[1] - pinfo[2+index]; inaddr = (unsigned int)db_addr+pinfo[2+index]; printf("show_logo, in_addr=0x%08x, fb_addr=0x%08x, logolen=%d, ticks=%d\n", inaddr, fb_addr, logolen, get_ticks()); // mt65xx_logo_decompress((void*)inaddr, (void*)fb_addr + 2 * fb_size, logolen, fb_size); #if 1 { unsigned short *d; int j,k; if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "270", 3)) { unsigned int l; unsigned short *s; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt65xx_logo_decompress((void*)inaddr, (void*)fb_addr + 2 * fb_size, logolen, fb_size); s = fb_addr + 2 * fb_size; for (j=0; j<width; j++){ for (k=0, l=height-1; k<height; k++, l--) { d = fb_addr + ((ALIGN_TO(width, 32) * l + j) << 1); *d = *s++; } } } else if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "90", 2)) { unsigned int l; unsigned short *s; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt65xx_logo_decompress((void*)inaddr, (void*)fb_addr + 2 * fb_size, logolen, fb_size); s = fb_addr + 2 * fb_size; for (j=width - 1; j>=0; j--){ for (k=0, l=0; k<height; k++, l++) { d = fb_addr + ((ALIGN_TO(width, 32) * l + j) << 1); *d = *s++; } } } else #endif { if(0 != CFG_DISPLAY_WIDTH % 32){ unsigned short *s; unsigned short *d; unsigned int width = CFG_DISPLAY_WIDTH; unsigned int height = CFG_DISPLAY_HEIGHT; mt65xx_logo_decompress((void*)inaddr, (void*)fb_addr + 2 * fb_size, logolen, fb_size); s = fb_addr + 2 * fb_size; d = fb_addr; for (j=0;j < height; j++){ { memcpy(d, s, width * 2); d += ALIGN_TO(width, 32); s += width; } } } else{ mt65xx_logo_decompress((void*)inaddr, (void*)fb_addr, logolen, fb_size); } } } printf("ticks=%d\n", get_ticks()); }
ulong get_timer_masked(void) { unsigned long long res = get_ticks(); do_div (res, (timer_load_val / (100 * CONFIG_SYS_HZ))); return res; }
void tcp_main_loop() { int r; int n; fd_set master_set; fd_set sel_set; int maxfd; struct tcp_connection* tcpconn; unsigned h; long response[2]; int cmd; int bytes; struct timeval timeout; int fd; /*init */ maxfd=0; FD_ZERO(&master_set); /* set all the listen addresses */ for (r=0; r<sock_no; r++){ if ((tcp_info[r].proto==PROTO_TCP) &&(tcp_info[r].socket!=-1)){ FD_SET(tcp_info[r].socket, &master_set); if (tcp_info[r].socket>maxfd) maxfd=tcp_info[r].socket; } #ifdef USE_TLS if ((!tls_disable)&&(tls_info[r].proto==PROTO_TLS) && (tls_info[r].socket!=-1)){ FD_SET(tls_info[r].socket, &master_set); if (tls_info[r].socket>maxfd) maxfd=tls_info[r].socket; } #endif } /* set all the unix sockets used for child comm */ for (r=1; r<process_no; r++){ if (pt[r].unix_sock>0){ /* we can't have 0, we never close it!*/ FD_SET(pt[r].unix_sock, &master_set); if (pt[r].unix_sock>maxfd) maxfd=pt[r].unix_sock; } } for (r=0; r<tcp_children_no; r++){ if (tcp_children[r].unix_sock>0){ /* we can't have 0, we never close it!*/ FD_SET(tcp_children[r].unix_sock, &master_set); if (tcp_children[r].unix_sock>maxfd) maxfd=tcp_children[r].unix_sock; } } /* main loop*/ while(1){ sel_set=master_set; timeout.tv_sec=TCP_MAIN_SELECT_TIMEOUT; timeout.tv_usec=0; n=select(maxfd+1, &sel_set, 0 ,0 , &timeout); if (n<0){ if (errno==EINTR) continue; /* just a signal */ /* errors */ LOG(L_ERR, "ERROR: tcp_main_loop: select:(%d) %s\n", errno, strerror(errno)); n=0; } for (r=0; r<sock_no && n; r++){ handle_new_connect(&tcp_info[r], &sel_set, &n); #ifdef USE_TLS if (!tls_disable) handle_new_connect(&tls_info[r], &sel_set, &n); #endif } /* check all the read fds (from the tcpconn_addr_hash ) */ for (h=0; h<TCP_ADDR_HASH_SIZE; h++){ for(tcpconn=tcpconn_addr_hash[h]; tcpconn && n; tcpconn=tcpconn->next){ /* FIXME: is refcnt==0 really necessary? */ if ((tcpconn->refcnt==0)&&(FD_ISSET(tcpconn->s, &sel_set))){ /* new data available */ n--; /* pass it to child, so remove it from select list */ DBG("tcp_main_loop: data available on %p [h:%d] %d\n", tcpconn, h, tcpconn->s); FD_CLR(tcpconn->s, &master_set); tcpconn_ref(tcpconn); /* refcnt ++ */ if (send2child(tcpconn)<0){ LOG(L_ERR,"ERROR: tcp_main_loop: no " "children available\n"); TCPCONN_LOCK; tcpconn->refcnt--; if (tcpconn->refcnt==0){ fd=tcpconn->s; _tcpconn_rm(tcpconn); close(fd); }else tcpconn->timeout=0; /* force expire*/ TCPCONN_UNLOCK; } } } } /* check unix sockets & listen | destroy connections */ /* tcp_children readers first */ for (r=0; r<tcp_children_no && n; r++){ if ( (tcp_children[r].unix_sock>0) && FD_ISSET(tcp_children[r].unix_sock, &sel_set)){ /* (we can't have a fd==0, 0 is never closed )*/ n--; /* read until sizeof(response) * (this is a SOCK_STREAM so read is not atomic */ bytes=recv_all(tcp_children[r].unix_sock, response, sizeof(response)); if (bytes==0){ /* EOF -> bad, child has died */ DBG("DBG: tcp_main_loop: dead tcp child %d" " (shutting down?)\n", r); /* don't listen on it any more */ FD_CLR(tcp_children[r].unix_sock, &master_set); /*exit(-1);*/ continue; /* skip this and try the next one */ }else if (bytes<0){ LOG(L_CRIT, "ERROR: tcp_main_loop: read from tcp child %d " "%s\n", r, strerror(errno)); /* try to ignore ? */ continue; /* skip this and try the next one */ } DBG("tcp_main_loop: reader response= %lx, %ld from %d \n", response[0], response[1], r); cmd=response[1]; tcpconn=(struct tcp_connection*)response[0]; switch(cmd){ case CONN_RELEASE: tcp_children[r].busy--; if (tcpconn){ if (tcpconn->state==S_CONN_BAD){ tcpconn_destroy(tcpconn); break; } FD_SET(tcpconn->s, &master_set); if (maxfd<tcpconn->s) maxfd=tcpconn->s; /* update the timeout*/ tcpconn->timeout=get_ticks()+TCP_CON_TIMEOUT; tcpconn_put(tcpconn); DBG("tcp_main_loop: CONN_RELEASE %p" " refcnt= %d\n", tcpconn, tcpconn->refcnt); } break; case CONN_ERROR: case CONN_DESTROY: case CONN_EOF: /* WARNING: this will auto-dec. refcnt! */ tcp_children[pt[r].idx].busy--; if (tcpconn){ if (tcpconn->s!=-1) FD_CLR(tcpconn->s, &master_set); tcpconn_destroy(tcpconn); } break; default: LOG(L_CRIT, "BUG: tcp_main_loop: unknown cmd %d" " from tcp reader %d\n", cmd, r); } } } /* check "send" unix sockets & listen | destroy connections */ /* start from 1, the "main" process does not transmit anything*/ for (r=1; r<process_no && n; r++){ if ( (pt[r].unix_sock>0) && FD_ISSET(pt[r].unix_sock, &sel_set)){ /* (we can't have a fd==0, 0 is never closed )*/ n--; /* read until sizeof(response) * (this is a SOCK_STREAM so read is not atomic */ bytes=recv_all(pt[r].unix_sock, response, sizeof(response)); if (bytes==0){ /* EOF -> bad, child has died */ DBG("DBG: tcp_main_loop: dead child %d" " (shutting down?)\n", r); /* don't listen on it any more */ FD_CLR(pt[r].unix_sock, &master_set); /*exit(-1);*/ continue; /* skip this and try the next one */ }else if (bytes<0){ LOG(L_CRIT, "ERROR: tcp_main_loop: read from child: %s\n", strerror(errno)); /* try to ignore ? */ continue; /* skip this and try the next one */ } DBG("tcp_main_loop: read response= %lx, %ld from %d (%d)\n", response[0], response[1], r, pt[r].pid); cmd=response[1]; tcpconn=(struct tcp_connection*)response[0]; switch(cmd){ case CONN_ERROR: if (tcpconn){ if (tcpconn->s!=-1) FD_CLR(tcpconn->s, &master_set); tcpconn_destroy(tcpconn); } break; case CONN_GET_FD: /* send the requested FD */ /* WARNING: take care of setting refcnt properly to * avoid race condition */ if (tcpconn){ if (send_fd(pt[r].unix_sock, &tcpconn, sizeof(tcpconn), tcpconn->s)<=0){ LOG(L_ERR, "ERROR: tcp_main_loop:" "send_fd failed\n"); } }else{ LOG(L_CRIT, "BUG: tcp_main_loop: null pointer\n"); } break; case CONN_NEW: /* update the fd in the requested tcpconn*/ /* WARNING: take care of setting refcnt properly to * avoid race condition */ if (tcpconn){ bytes=receive_fd(pt[r].unix_sock, &tcpconn, sizeof(tcpconn), &tcpconn->s); if (bytes<sizeof(tcpconn)){ if (bytes<0){ LOG(L_CRIT, "BUG: tcp_main_loop:" " CONN_NEW: receive_fd " "failed\n"); }else{ LOG(L_CRIT, "BUG: tcp_main_loop:" " CONN_NEW: to few bytes " "received (%d)\n", bytes ); } break; /* try to ignore */ } /* add tcpconn to the list*/ tcpconn_add(tcpconn); FD_SET(tcpconn->s, &master_set); if (maxfd<tcpconn->s) maxfd=tcpconn->s; /* update the timeout*/ tcpconn->timeout=get_ticks()+TCP_CON_TIMEOUT; }else{ LOG(L_CRIT, "BUG: tcp_main_loop: null pointer\n"); } break; default: LOG(L_CRIT, "BUG: tcp_main_loop: unknown cmd %d\n", cmd); } } } /* for */ /* remove old connections */ tcpconn_timeout(&master_set); } }
void reset_timer(void) { gd->timer_reset_value = get_ticks(); }
/** * worker implementation * - jwl : pointer to the workers list * - jaddress : address of the jabber server * - jport : port of the jabber server * - rank : worker's rank * - db_con : connection to database * dbf: database module callbacks structure * #return : 0 on success or <0 on error */ int xj_worker_process(xj_wlist jwl, char* jaddress, int jport, int rank, db_con_t* db_con, db_func_t* dbf) { int pipe, ret, i, pos, maxfd, flag; xj_jcon_pool jcp; struct timeval tmv; fd_set set, mset; xj_sipmsg jsmsg; str sto; xj_jcon jbc = NULL; xj_jconf jcf = NULL; char *p, buff[1024], recv_buff[4096]; int flags, nr, ltime = 0; db_key_t keys[] = {"sip_id", "type"}; db_val_t vals[2]; db_key_t col[] = {"jab_id", "jab_passwd"}; db_res_t* res = NULL; vals[0].type=DB_STRING; vals[0].nul=0; vals[0].val.string_val=buff; vals[1].type=DB_INT; vals[1].nul=0; vals[1].val.int_val=0; _xj_pid = getpid(); //signal(SIGTERM, xj_sig_handler); //signal(SIGINT, xj_sig_handler); //signal(SIGQUIT, xj_sig_handler); signal(SIGSEGV, xj_sig_handler); if(registrar) { jab_gw_name.s = registrar; jab_gw_name.len = strlen(registrar); if(registrar[0]== 's' && registrar[1]== 'i' && registrar[2]== 'p' && registrar[3]== ':') { jab_gw_name.s += 4; jab_gw_name.len -= 4; } } if(!jwl || !jwl->aliases || !jwl->aliases->jdm || !jaddress || rank >= jwl->len) { DBG("XJAB:xj_worker[%d]:%d: exiting - wrong parameters\n", rank, _xj_pid); return -1; } pipe = jwl->workers[rank].rpipe; DBG("XJAB:xj_worker[%d]:%d: started - pipe=<%d> : 1st message delay" " <%d>\n", rank, _xj_pid, pipe, jwl->delayt); if((jcp=xj_jcon_pool_init(jwl->maxj,XJ_MSG_POOL_SIZE,jwl->delayt))==NULL) { DBG("XJAB:xj_worker: cannot allocate the pool\n"); return -1; } maxfd = pipe; tmv.tv_sec = jwl->sleept; tmv.tv_usec = 0; FD_ZERO(&set); FD_SET(pipe, &set); while(main_loop) { mset = set; tmv.tv_sec = (jcp->jmqueue.size == 0)?jwl->sleept:1; #ifdef XJ_EXTRA_DEBUG //DBG("XJAB:xj_worker[%d]:%d: select waiting %ds - queue=%d\n",rank, // _xj_pid, (int)tmv.tv_sec, jcp->jmqueue.size); #endif tmv.tv_usec = 0; ret = select(maxfd+1, &mset, NULL, NULL, &tmv); // check the msg queue xj_worker_check_qmsg(jwl, jcp); if(ret <= 0) goto step_x; #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: something is coming\n", _xj_pid); #endif if(!FD_ISSET(pipe, &mset)) goto step_y; if(read(pipe, &jsmsg, sizeof(jsmsg)) < sizeof(jsmsg)) { DBG("XJAB:xj_worker:%d: BROKEN PIPE - exiting\n", _xj_pid); break; } #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: job <%p> from SER\n", _xj_pid, jsmsg); #endif if(jsmsg == NULL || jsmsg->jkey==NULL || jsmsg->jkey->id==NULL) goto step_w; strncpy(buff, jsmsg->jkey->id->s, jsmsg->jkey->id->len); buff[jsmsg->jkey->id->len] = 0; jbc = xj_jcon_pool_get(jcp, jsmsg->jkey); switch(jsmsg->type) { case XJ_SEND_MESSAGE: if(!xj_jconf_check_addr(&jsmsg->to, jwl->aliases->dlm) && (!jbc||!xj_jcon_get_jconf(jbc,&jsmsg->to,jwl->aliases->dlm))) { xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_NOTJCONF, NULL); goto step_w; } break; case XJ_REG_WATCHER: case XJ_JOIN_JCONF: case XJ_GO_ONLINE: break; case XJ_EXIT_JCONF: if(jbc == NULL) goto step_w; // close the conference session here if(jbc->nrjconf <= 0) goto step_w; if(!xj_jconf_check_addr(&jsmsg->to, jwl->aliases->dlm)) xj_jcon_del_jconf(jbc, &jsmsg->to, jwl->aliases->dlm, XJ_JCMD_UNSUBSCRIBE); xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_INF_JCONFEXIT, NULL); goto step_w; case XJ_GO_OFFLINE: if(jbc != NULL) jbc->expire = ltime = -1; goto step_w; case XJ_DEL_WATCHER: default: goto step_w; } if(jbc != NULL) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: connection already exists" " for <%s> ...\n", _xj_pid, buff); #endif xj_jcon_update(jbc, jwl->cachet); goto step_z; } // NO OPEN CONNECTION FOR THIS SIP ID #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: new connection for <%s>.\n", _xj_pid, buff); #endif if(dbf->query(db_con, keys, 0, vals, col, 2, 2, NULL, &res) != 0 || RES_ROW_N(res) <= 0) { #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: no database result when looking" " for associated Jabber account\n", _xj_pid); #endif xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_JGWFORB, NULL); goto step_v; } jbc = xj_jcon_init(jaddress, jport); if(xj_jcon_connect(jbc)) { DBG("XJAB:xj_worker:%d: Cannot connect" " to the Jabber server ...\n", _xj_pid); xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_NOJSRV, NULL); goto step_v; } #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker: auth to jabber as: [%s] / [xxx]\n", (char*)(ROW_VALUES(RES_ROWS(res))[0].val.string_val)); // (char*)(ROW_VALUES(RES_ROWS(res))[1].val.string_val)); #endif if(xj_jcon_user_auth(jbc, (char*)(ROW_VALUES(RES_ROWS(res))[0].val.string_val), (char*)(ROW_VALUES(RES_ROWS(res))[1].val.string_val), XJAB_RESOURCE) < 0) { DBG("XJAB:xj_worker:%d: Authentication to the Jabber server" " failed ...\n", _xj_pid); xj_jcon_disconnect(jbc); xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_JAUTH, NULL); xj_jcon_free(jbc); goto step_v; } if(xj_jcon_set_attrs(jbc, jsmsg->jkey, jwl->cachet, jwl->delayt) || xj_jcon_pool_add(jcp, jbc)) { DBG("XJAB:xj_worker:%d: Keeping connection to Jabber server" " failed! Not enough memory ...\n", _xj_pid); xj_jcon_disconnect(jbc); xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_JGWFULL, NULL); xj_jcon_free(jbc); goto step_v; } /** add socket descriptor to select */ #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: add connection on <%d> \n", _xj_pid, jbc->sock); #endif if(jbc->sock > maxfd) maxfd = jbc->sock; FD_SET(jbc->sock, &set); xj_jcon_get_roster(jbc); xj_jcon_send_presence(jbc, NULL, NULL, "Online", "9"); /** wait for a while - the worker is tired */ //sleep(3); if ((res != NULL) && (dbf->free_result(db_con,res) < 0)) { DBG("XJAB:xj_worker:%d:Error while freeing" " SQL result - worker terminated\n", _xj_pid); return -1; } else res = NULL; step_z: if(jsmsg->type == XJ_GO_ONLINE) goto step_w; if(jsmsg->type == XJ_REG_WATCHER) { // update or register a presence watcher xj_worker_check_watcher(jwl, jcp, jbc, jsmsg); goto step_w; } flag = 0; if(!xj_jconf_check_addr(&jsmsg->to, jwl->aliases->dlm)) { if((jcf = xj_jcon_get_jconf(jbc, &jsmsg->to, jwl->aliases->dlm)) != NULL) { if((jsmsg->type == XJ_JOIN_JCONF) && !(jcf->status & XJ_JCONF_READY || jcf->status & XJ_JCONF_WAITING)) { if(!xj_jcon_jconf_presence(jbc,jcf,NULL,"online")) jcf->status = XJ_JCONF_WAITING; else { // unable to join the conference // --- send back to SIP user a msg xj_send_sip_msgz(_PADDR(jwl),jsmsg->jkey->id,&jsmsg->to, XJ_DMSG_ERR_JOINJCONF, &jbc->jkey->flag); goto step_w; } } flag |= XJ_ADDRTR_CON; } else { // unable to get the conference // --- send back to SIP user a msg xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_NEWJCONF, &jbc->jkey->flag); goto step_w; } } if(jsmsg->type != XJ_SEND_MESSAGE) goto step_w; // here will come only XJ_SEND_MESSAGE switch(xj_jcon_is_ready(jbc,jsmsg->to.s,jsmsg->to.len,jwl->aliases->dlm)) { case 0: #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: SENDING the message to Jabber" " network ...\n", _xj_pid); #endif /*** address correction ***/ sto.s = buff; sto.len = 0; flag |= XJ_ADDRTR_S2J; if(xj_address_translation(&jsmsg->to, &sto, jwl->aliases, flag) == 0) { if(xj_jcon_send_msg(jbc, sto.s, sto.len, jsmsg->msg.s, jsmsg->msg.len, (flag&XJ_ADDRTR_CON)?XJ_JMSG_GROUPCHAT:XJ_JMSG_CHAT)<0) xj_send_sip_msgz(_PADDR(jwl),jsmsg->jkey->id,&jsmsg->to, XJ_DMSG_ERR_SENDJMSG, &jbc->jkey->flag); } else DBG("XJAB:xj_worker:%d: ERROR SENDING as Jabber" " message ...\n", _xj_pid); goto step_w; case 1: #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d:SCHEDULING the message.\n", _xj_pid); #endif if(xj_jcon_pool_add_jmsg(jcp, jsmsg, jbc) < 0) { DBG("XJAB:xj_worker:%d: SCHEDULING the message FAILED." " Message was dropped.\n",_xj_pid); xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_STOREJMSG, &jbc->jkey->flag); goto step_w; } else // skip freeing the SIP message - now is in queue goto step_y; case 2: xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_NOREGIM, &jbc->jkey->flag); goto step_w; case 3: // not joined to Jabber conference xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_NOTJCONF, &jbc->jkey->flag); goto step_w; default: xj_send_sip_msgz(_PADDR(jwl), jsmsg->jkey->id, &jsmsg->to, XJ_DMSG_ERR_SENDJMSG, &jbc->jkey->flag); goto step_w; } step_v: // error connecting to Jabber server // cleaning jab_wlist xj_wlist_del(jwl, jsmsg->jkey, _xj_pid); // cleaning db_query if ((res != NULL) && (dbf->free_result(db_con,res) < 0)) { DBG("XJAB:xj_worker:%d:Error while freeing" " SQL result - worker terminated\n", _xj_pid); return -1; } else res = NULL; step_w: if(jsmsg!=NULL) { xj_sipmsg_free(jsmsg); jsmsg = NULL; } step_y: // check for new message from ... JABBER for(i = 0; i < jcp->len && main_loop; i++) { if(jcp->ojc[i] == NULL) continue; #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: checking socket <%d>" " ...\n", _xj_pid, jcp->ojc[i]->sock); #endif if(!FD_ISSET(jcp->ojc[i]->sock, &mset)) continue; pos = nr = 0; do { p = recv_buff; if(pos != 0) { while(pos < nr) { *p = recv_buff[pos]; pos++; p++; } *p = 0; /** * flush out the socket - set it to nonblocking */ flags = fcntl(jcp->ojc[i]->sock, F_GETFL, 0); if(flags!=-1 && !(flags & O_NONBLOCK)) { /* set NONBLOCK bit to enable non-blocking */ fcntl(jcp->ojc[i]->sock, F_SETFL, flags|O_NONBLOCK); } } if((nr = read(jcp->ojc[i]->sock, p, sizeof(recv_buff)-(p-recv_buff))) == 0 ||(nr < 0 && errno != EAGAIN)) { DBG("XJAB:xj_worker:%d: ERROR -" " connection to jabber lost on socket <%d> ...\n", _xj_pid, jcp->ojc[i]->sock); xj_send_sip_msgz(_PADDR(jwl), jcp->ojc[i]->jkey->id, &jab_gw_name,XJ_DMSG_ERR_DISCONNECTED,&jbc->jkey->flag); // make sure that will ckeck expired connections ltime = jcp->ojc[i]->expire = -1; FD_CLR(jcp->ojc[i]->sock, &set); goto step_xx; } #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: received: %dbytes Err:%d/EA:%d\n", _xj_pid, nr, errno, EAGAIN); #endif xj_jcon_update(jcp->ojc[i], jwl->cachet); if(nr>0) p[nr] = 0; nr = strlen(recv_buff); pos = 0; #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker: JMSG START ----------\n%.*s\n" " JABBER: JMSGL:%d END ----------\n", nr, recv_buff, nr); #endif } while(xj_manage_jab(recv_buff, nr, &pos, jwl->aliases, jcp->ojc[i]) == 9 && main_loop); /** * flush out the socket - set it back to blocking */ flags = fcntl(jcp->ojc[i]->sock, F_GETFL, 0); if(flags!=-1 && (flags & O_NONBLOCK)) { /* reset NONBLOCK bit to enable blocking */ fcntl(jcp->ojc[i]->sock, F_SETFL, flags & ~O_NONBLOCK); } #ifdef XJ_EXTRA_DEBUG DBG("XJAB:xj_worker:%d: msgs from socket <%d> parsed" " ...\n", _xj_pid, jcp->ojc[i]->sock); #endif } // end FOR(i = 0; i < jcp->len; i++) step_x: if(ret < 0) { DBG("XJAB:xj_worker:%d: SIGNAL received!!!!!!!!\n", _xj_pid); maxfd = pipe; FD_ZERO(&set); FD_SET(pipe, &set); for(i = 0; i < jcp->len; i++) { if(jcp->ojc[i] != NULL) { FD_SET(jcp->ojc[i]->sock, &set); if( jcp->ojc[i]->sock > maxfd ) maxfd = jcp->ojc[i]->sock; } } } step_xx: if(ltime < 0 || ltime + jwl->sleept <= get_ticks()) { ltime = get_ticks(); #ifdef XJ_EXTRA_DEBUG //DBG("XJAB:xj_worker:%d: scanning for expired connection\n", // _xj_pid); #endif xj_worker_check_jcons(jwl, jcp, ltime, &set); } } // END while DBG("XJAB:xj_worker:%d: cleaning procedure\n", _xj_pid); return 0; } // end xj_worker_process
void CowichanSerial::winnow(IntMatrix matrix, BoolMatrix mask, PointVector points) { index_t r, c; index_t len; // number of points index_t stride; // selection stride index_t i, j; // count set cell len = mask_count (mask, nr, nc); if (len < n) { not_enough_points(); } WeightedPointVector weightedPoints = NULL; try { weightedPoints = NEW_VECTOR_SZ(WeightedPoint, len); } catch (...) {out_of_memory();} // fill temporary vector i = 0; for (r = 0; r < nr; r++) { for (c = 0; c < nc; c++) { if (MATRIX_RECT(mask, r, c)) { weightedPoints[i++] = WeightedPoint((real)c, (real)r, MATRIX_RECT(matrix, r, c)); } } } #ifdef SORT_TIME INT64 start, end; start = get_ticks (); #endif // sort std::sort(weightedPoints, &weightedPoints[len]); #ifdef SORT_TIME end = get_ticks (); #endif // copy over points stride = len / n; for (i = n - 1, j = len - 1; i >= 0; i--, j -= stride) { #ifdef WINNOW_OUTPUT std::cout << weightedPoints[j].weight << "\n"; #endif points[i] = weightedPoints[j].point; } #ifdef SORT_TIME std::cout << "winnow sort: "; print_elapsed_time(start, end); std::cout << std::endl; #endif delete [] weightedPoints; }
int Timer::get_left() { return (period - (get_ticks() - time)); }
/* blocking write even on non-blocking sockets * if TCP_TIMEOUT will return with error */ static int tcp_blocking_write(struct tcp_connection* c, int fd, char* buf, unsigned int len) { int n; fd_set sel_set; struct timeval timeout; int ticks; int initial_len; initial_len=len; again: n=send(fd, buf, len, #ifdef HAVE_MSG_NOSIGNAL MSG_NOSIGNAL #else 0 #endif ); if (n<0){ if (errno==EINTR) goto again; else if (errno!=EAGAIN && errno!=EWOULDBLOCK){ LOG(L_ERR, "tcp_blocking_write: failed to send: (%d) %s\n", errno, strerror(errno)); goto error; } }else if (n<len){ /* partial write */ buf+=n; len-=n; }else{ /* success: full write */ goto end; } while(1){ FD_ZERO(&sel_set); FD_SET(fd, &sel_set); timeout.tv_sec=tcp_send_timeout; timeout.tv_usec=0; ticks=get_ticks(); n=select(fd+1, 0, &sel_set, 0, &timeout); if (n<0){ if (errno==EINTR) continue; /* signal, ignore */ LOG(L_ERR, "ERROR: tcp_blocking_write: select failed: " " (%d) %s\n", errno, strerror(errno)); goto error; }else if (n==0){ /* timeout */ if (get_ticks()-ticks>=tcp_send_timeout){ LOG(L_ERR, "ERROR: tcp_blocking_write: send timeout (%d)\n", tcp_send_timeout); goto error; } continue; } if (FD_ISSET(fd, &sel_set)){ /* we can write again */ goto again; } } error: return -1; end: return initial_len; }
int pike_check_req(struct sip_msg *msg) { struct ip_node *node; struct ip_node *father; unsigned char flags; struct ip_addr* ip; #ifdef _test /* get the ip address from second via */ if (parse_headers(msg, HDR_VIA1_F, 0)!=0 ) return -1; if (msg->via1==0 ) return -1; /* convert from string to ip_addr */ ip = str2ip( &msg->via1->host ); if (ip==0) return -1; #else ip = &(msg->rcv.src_ip); #endif /* first lock the proper tree branch and mark the IP with one more hit*/ lock_tree_branch( ip->u.addr[0] ); node = mark_node( ip->u.addr, ip->len, &father, &flags); if (node==0) { unlock_tree_branch( ip->u.addr[0] ); /* even if this is an error case, we return true in script to avoid * considering the IP as marked (bogdan) */ return 1; } LM_DBG("src IP [%s],node=%p; hits=[%d,%d],[%d,%d] node_flags=%d" " func_flags=%d\n", ip_addr2a( ip ), node, node->hits[PREV_POS],node->hits[CURR_POS], node->leaf_hits[PREV_POS],node->leaf_hits[CURR_POS], node->flags, flags); /* update the timer */ lock_get(timer_lock); if ( flags&NEW_NODE ) { /* put this node into the timer list and remove its father only if this has one kid and is not a LEAF_NODE*/ node->expires = get_ticks() + timeout; append_to_timer( timer, &(node->timer_ll) ); node->flags |= NODE_INTIMER_FLAG; if (father) { LM_DBG("father %p: flags=%d kids->next=%p\n", father,father->flags,father->kids->next); if (!(father->flags&NODE_IPLEAF_FLAG) && !father->kids->next){ /* debug */ assert( has_timer_set(&(father->timer_ll)) && (father->flags&(NODE_EXPIRED_FLAG|NODE_INTIMER_FLAG)) ); /* if the node is maked as expired by timer, let the timer * to finish and remove the node */ if ( !(father->flags&NODE_EXPIRED_FLAG) ) { remove_from_timer( timer, &(father->timer_ll) ); father->flags &= ~NODE_INTIMER_FLAG; } else { father->flags &= ~NODE_EXPIRED_FLAG; } } } } else { /* update the timer -> in timer can be only nodes * as IP-leaf(complete address) or tree-leaf */ if (node->flags&NODE_IPLEAF_FLAG || node->kids==0) { /* tree leafs which are not potential red nodes are not update in * order to make them to expire */ /* debug */ assert( has_timer_set(&(node->timer_ll)) && (node->flags&(NODE_EXPIRED_FLAG|NODE_INTIMER_FLAG)) ); /* if node exprired, ignore the current hit and let is * expire in timer process */ if ( !(flags&NO_UPDATE) && !(node->flags&NODE_EXPIRED_FLAG) ) { node->expires = get_ticks() + timeout; update_in_timer( timer, &(node->timer_ll) ); } } else { /* debug */ assert( !has_timer_set(&(node->timer_ll)) && !(node->flags&(NODE_INTIMER_FLAG|NODE_EXPIRED_FLAG)) ); /* debug */ assert( !(node->flags&NODE_IPLEAF_FLAG) && node->kids ); } } /*print_timer_list( timer );*/ /* debug*/ lock_release(timer_lock); unlock_tree_branch( ip->u.addr[0] ); /*print_tree( 0 );*/ /* debug */ if (flags&RED_NODE) { if (flags&NEWRED_NODE) { LM_GEN1( pike_log_level, "PIKE - BLOCKing ip %s, node=%p\n",ip_addr2a(ip),node); pike_raise_event(ip_addr2a(ip)); return -2; } return -1; } return 1; }
retCode g__ticks(processPo p, ptrPo a) { ptrI T = allocateFloat(&p->proc.heap, get_ticks()); return equal(p, &T, &a[1]); }
/*TODO: This delay will block interupt */ void delay(int milli_sec) { int t = get_ticks(); while (((get_ticks() - t) * 1000 / HZ) < milli_sec) { } }
ulong get_timer_masked(void) { unsigned long long res = get_ticks(); return res; }
int frame_skip(int init) { static int f2skip; static uclock_t sec = 0; static uclock_t rfd; static uclock_t target; static int nbFrame = 0; static unsigned int nbFrame_moy = 0; static int nbFrame_min = 1000; static int nbFrame_max = 0; static int skpFrm = 0; static int count = 0; static int moy=60; if (init_frame_skip) { init_frame_skip = 0; target = get_ticks(); bench = (CF_BOOL(cf_get_item_by_name("bench")) ? 1/*get_ticks()*/ : 0); nbFrame = 0; //f2skip=0; //skpFrm=0; sec = 0; return 0; } target += F; if (f2skip > 0 ) { f2skip--; skpFrm++; return 1; } else skpFrm = 0; // printf("%d %d\n",conf.autoframeskip,conf.show_fps); rfd = get_ticks(); if (gTurboMode) { target=get_ticks(); f2skip=MAX_FRAMESKIP; } else { if (conf.autoframeskip) { if (rfd < target && f2skip == 0) { while (get_ticks() < target) { #ifndef WIN32 if (conf.sleep_idle) { usleep(5); } #endif } } else { f2skip = (rfd - target) / (double) F; if (f2skip > MAX_FRAMESKIP) { f2skip = MAX_FRAMESKIP; reset_frame_skip(); } // printf("Skip %d frame(s) %lu %lu\n",f2skip,target,rfd); } } } nbFrame++; nbFrame_moy++; if (conf.show_fps) { if (get_ticks() - sec >= TICKS_PER_SEC) { //printf("%d\n",nbFrame); if (bench) { if (nbFrame_min>nbFrame) nbFrame_min=nbFrame; if (nbFrame_max<nbFrame) nbFrame_max=nbFrame; count++; moy=nbFrame_moy/(float)count; if (count==30) count=0; sprintf(fps_str, "%d %d %d %d\n", nbFrame-1,nbFrame_min-1,nbFrame_max-1,moy-1); } else { sprintf(fps_str, "%2d", nbFrame-1); } nbFrame = 0; sec = get_ticks(); } } return 0; }