static bool IO_Serial_WaitToWrite(struct s_reader *reader, uint32_t delay_us, uint32_t timeout_us) { struct pollfd ufds; struct timeb start, end; int32_t ret_val; int32_t out_fd; int64_t polltimeout = timeout_us / 1000; #if !defined(WITH_COOLAPI) if(reader->typ == R_INTERNAL) { return OK; } // needed for internal readers, otherwise error! #endif if(delay_us > 0) { cs_sleepus(delay_us); } // wait in us out_fd = reader->handle; ufds.fd = out_fd; ufds.events = POLLOUT; ufds.revents = 0x0000; cs_ftime(&start); // register start time while(1) { ret_val = poll(&ufds, 1, polltimeout); cs_ftime(&end); // register end time switch(ret_val) { case 0: rdr_log(reader, "ERROR: not ready to write, timeout=%"PRId64" ms", comp_timeb(&end, &start)); return ERROR; case -1: if(errno == EINTR || errno == EAGAIN) { cs_sleepus(1); if(timeout_us > 0) { polltimeout = (timeout_us / 1000) - comp_timeb(&end, &start); if(polltimeout < 0) { polltimeout = 0; } } continue; } rdr_log(reader, "ERROR: %s: timeout=%"PRId64" ms (errno=%d %s)", __func__, comp_timeb(&end, &start), errno, strerror(errno)); return ERROR; default: if(((ufds.revents) & POLLOUT) == POLLOUT) { return OK; } else { return ERROR; } } } }
void cardreader_process_ecm(struct s_reader *reader, struct s_client *cl, ECM_REQUEST *er) { cs_log_dump_dbg(D_ATR, er->ecm, er->ecmlen, "ecm:"); struct timeb tps, tpe; struct s_ecm_answer ea; memset(&ea, 0, sizeof(struct s_ecm_answer)); cs_ftime(&tps); int32_t rc = cardreader_do_ecm(reader, er, &ea); cs_ftime(&tpe); rdr_log_dbg(reader, D_READER, "%s: cardreader_do_ecm returned rc=%d (ERROR=%d)", __func__, rc, ERROR); ea.rc = E_FOUND; //default assume found ea.rcEx = 0; //no special flag if(rc == ERROR) { char buf[CS_SERVICENAME_SIZE]; rdr_log_dbg(reader, D_READER, "Error processing ecm for caid %04X, provid %06X, srvid %04X, servicename: %s", er->caid, er->prid, er->srvid, get_servicename(cl, er->srvid, er->prid, er->caid, buf, sizeof(buf))); ea.rc = E_NOTFOUND; ea.rcEx = 0; ICC_Async_DisplayMsg(reader, "Eer"); } if(rc == E_CORRUPT) { char buf[CS_SERVICENAME_SIZE]; rdr_log_dbg(reader, D_READER, "Error processing ecm for caid %04X, provid %06X, srvid %04X, servicename: %s", er->caid, er->prid, er->srvid, get_servicename(cl, er->srvid, er->prid, er->caid, buf, sizeof(buf))); ea.rc = E_NOTFOUND; ea.rcEx = E2_WRONG_CHKSUM; //flag it as wrong checksum memcpy(ea.msglog, "Invalid ecm type for card", 25); } write_ecm_answer(reader, er, ea.rc, ea.rcEx, ea.cw, ea.msglog, ea.tier, &ea.cw_ex); cl->lastecm = time((time_t *)0); char ecmd5[17 * 3]; cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5)); rdr_log_dbg(reader, D_READER, "ecm hash: %s real time: %"PRId64" ms", ecmd5, comp_timeb(&tpe, &tps)); reader_post_process(reader); }
void cardreader_process_ecm(struct s_reader *reader, struct s_client *cl, ECM_REQUEST *er) { if (ecm_ratelimit_check(reader, er, 1) != OK) { rdr_debug_mask(reader, D_READER, "%s: ratelimit check failed.", __func__); return; // reader_mode = 1: checkout ratelimiter in reader mode so srvid can be replaced } cs_ddump_mask(D_ATR, er->ecm, er->ecmlen, "ecm:"); struct timeb tps, tpe; cs_ftime(&tps); struct s_ecm_answer ea; memset(&ea, 0, sizeof(struct s_ecm_answer)); int32_t rc = cardreader_do_ecm(reader, er, &ea); rdr_debug_mask(reader, D_READER, "%s: cardreader_do_ecm returned rc=%d (ERROR=%d)", __func__, rc, ERROR); ea.rc = E_FOUND; //default assume found ea.rcEx = 0; //no special flag if (rc == ERROR) { char buf[32]; rdr_debug_mask(reader, D_READER, "Error processing ecm for caid %04X, srvid %04X, servicename: %s", er->caid, er->srvid, get_servicename(cl, er->srvid, er->caid, buf)); ea.rc = E_NOTFOUND; ea.rcEx = 0; ICC_Async_DisplayMsg(reader, "Eer"); } if (rc == E_CORRUPT) { char buf[32]; rdr_debug_mask(reader, D_READER, "Error processing ecm for caid %04X, srvid %04X, servicename: %s", er->caid, er->srvid, get_servicename(cl, er->srvid, er->caid, buf)); ea.rc = E_NOTFOUND; ea.rcEx = E2_WRONG_CHKSUM; //flag it as wrong checksum memcpy (ea.msglog,"Invalid ecm type for card",25); } cs_ftime(&tpe); cl->lastecm=time((time_t*)0); char ecmd5[17*3]; cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5)); rdr_debug_mask(reader, D_READER, "ecm hash: %s real time: %ld ms", ecmd5, 1000 * (tpe.time - tps.time) + tpe.millitm - tps.millitm); write_ecm_answer(reader, er, ea.rc, ea.rcEx, ea.cw, ea.msglog); reader_post_process(reader); }
void azbox_openxcas_ecm_callback(int32_t stream_id, uint32_t UNUSED(seq), int32_t cipher_index, uint32_t UNUSED(caid), unsigned char *ecm_data, int32_t l, uint16_t pid) { cs_debug_mask(D_DVBAPI, LOG_PREFIX "ecm callback received"); openxcas_stream_id = stream_id; //openxcas_seq = seq; //openxcas_caid = caid; openxcas_ecm_pid = pid; openxcas_busy = 1; ECM_REQUEST *er; if (!(er=get_ecmtask())) return; er->srvid = openxcas_sid; er->caid = openxcas_caid; er->pid = openxcas_ecm_pid; er->prid = openxcas_provid; er->ecmlen = l; memcpy(er->ecm, ecm_data, er->ecmlen); request_cw(dvbapi_client, er, 0, 0); //openxcas_stop_filter(openxcas_stream_id, OPENXCAS_FILTER_ECM); //openxcas_remove_filter(openxcas_stream_id, OPENXCAS_FILTER_ECM); openxcas_cipher_idx = cipher_index; struct timeb tp; cs_ftime(&tp); tp.time+=500; }
static int32_t cacheex_check_hitcache(ECM_REQUEST *er, struct s_client *cl) { CACHE_HIT *result; HIT_KEY search; memset(&search, 0, sizeof(HIT_KEY)); search.caid = er->caid; search.prid = er->prid; search.srvid = er->srvid; SAFE_RWLOCK_RDLOCK(&hitcache_lock); result = find_hash_table(&ht_hitcache, &search, sizeof(HIT_KEY), &cacheex_compare_hitkey); if(result){ struct timeb now; cs_ftime(&now); int64_t gone = comp_timeb(&now, &result->time); uint64_t grp = cl?cl->grp:0; if( gone <= (cfg.max_hitcache_time*1000) && (!grp || !result->grp || (grp & result->grp)) ) { SAFE_RWLOCK_UNLOCK(&hitcache_lock); return 1; } } SAFE_RWLOCK_UNLOCK(&hitcache_lock); return 0; }
int64_t add_ms_to_timeb_diff(struct timeb *tb, int32_t ms) { struct timeb tb_now; add_ms_to_timeb(tb, ms); cs_ftime(&tb_now); return comp_timeb(tb, &tb_now); }
void cacheex_cleanup_hitcache(bool force) { CACHE_HIT *cachehit; node *i,*i_next; struct timeb now; int64_t gone; int32_t timeout = (cfg.max_hitcache_time + (cfg.max_hitcache_time / 2))*1000; //1,5 SAFE_RWLOCK_WRLOCK(&hitcache_lock); i = get_first_node_list(&ll_hitcache); while (i) { i_next = i->next; cachehit = get_data_from_node(i); if(!cachehit) { i = i_next; continue; } cs_ftime(&now); gone = comp_timeb(&now, &cachehit->time); if(force || gone>timeout) { remove_elem_list(&ll_hitcache, &cachehit->ll_node); remove_elem_hash_table(&ht_hitcache, &cachehit->ht_node); NULLFREE(cachehit); } i = i_next; } SAFE_RWLOCK_UNLOCK(&hitcache_lock); }
void cleanup_hitcache(void) { CACHE_HIT *cachehit; node *i,*i_next; struct timeb now; int32_t gone; int32_t timeout = (cfg.max_hitcache_time + (cfg.max_hitcache_time / 2))*1000; //1,5 pthread_rwlock_wrlock(&hitcache_lock); i = get_first_node_list(&ll_hitcache); while (i) { i_next = i->next; cachehit = get_data_from_node(i); cs_ftime(&now); gone = comp_timeb(&now, &cachehit->time); if(cachehit && gone>timeout){ remove_elem_list(&ll_hitcache, &cachehit->ll_node); remove_elem_hash_table(&ht_hitcache, &cachehit->ht_node); NULLFREE(cachehit); } i = i_next; } pthread_rwlock_unlock(&hitcache_lock); }
static void cacheex_add_hitcache(struct s_client *cl, ECM_REQUEST *er) { if (!cfg.max_hitcache_time) // we don't want check/save hitcache return; if (!cfg.cacheex_wait_timetab.cevnum) return; uint32_t cacheex_wait_time = get_cacheex_wait_time(er,NULL); if (!cacheex_wait_time) return; CACHE_HIT *result; HIT_KEY search; memset(&search, 0, sizeof(HIT_KEY)); search.caid = er->caid; search.prid = er->prid; search.srvid = er->srvid; SAFE_RWLOCK_WRLOCK(&hitcache_lock); result = find_hash_table(&ht_hitcache, &search, sizeof(HIT_KEY), &cacheex_compare_hitkey); if(!result) // not found, add it! { if(cs_malloc(&result, sizeof(CACHE_HIT))) { memset(result, 0, sizeof(CACHE_HIT)); result->key.caid = er->caid; result->key.prid = er->prid; result->key.srvid = er->srvid; cs_ftime(&result->max_hitcache_time); add_hash_table(&ht_hitcache, &result->ht_node, &ll_hitcache, &result->ll_node, result, &result->key, sizeof(HIT_KEY)); } } if(result) { if(cl) { result->grp |= cl->grp; result->grp_last_max_hitcache_time |= cl->grp; } cs_ftime(&result->time); //always update time; } SAFE_RWLOCK_UNLOCK(&hitcache_lock); }
static int32_t oscam_ser_send(struct s_client *client, const uchar * const buf, int32_t l) { int32_t n; struct s_serial_client *serialdata=client->serialdata ; if (!client->pfd) return(0); cs_ftime(&serialdata->tps); serialdata->tpe=client->serialdata->tps; serialdata->tpe.millitm+=serialdata->oscam_ser_timeout+(l*(serialdata->oscam_ser_delay+1)); serialdata->tpe.time+=(serialdata->tpe.millitm/1000); serialdata->tpe.millitm%=1000; n=oscam_ser_write(client, buf, l); cs_ftime(&serialdata->tpe); cs_ddump_mask(D_CLIENT, buf, l, "send %d of %d bytes to %s in %ld msec", n, l, remote_txt(), 1000*(serialdata->tpe.time-serialdata->tps.time)+serialdata->tpe.millitm-serialdata->tps.millitm); if (n!=l) cs_log("transmit error. send %d of %d bytes only !", n, l); return(n); }
bool IO_Serial_WaitToRead(struct s_reader *reader, uint32_t delay_us, uint32_t timeout_us) { struct pollfd ufds; struct timeb start, end; int32_t ret_val; int32_t in_fd; int64_t polltimeout = timeout_us / 1000; if(delay_us > 0) { cs_sleepus(delay_us); } // wait in us in_fd = reader->handle; ufds.fd = in_fd; ufds.events = POLLIN | POLLPRI; ufds.revents = 0x0000; cs_ftime(&start); // register start time while(1) { ret_val = poll(&ufds, 1, polltimeout); cs_ftime(&end); // register end time switch(ret_val) { case -1: if(errno == EINTR || errno == EAGAIN) { cs_sleepus(1); if(timeout_us > 0) { polltimeout = (timeout_us / 1000) - comp_timeb(&end, &start); if(polltimeout < 0) { polltimeout = 0; } } continue; } rdr_log(reader, "ERROR: %s: timeout=%"PRId64" ms (errno=%d %s)", __func__, comp_timeb(&end, &start), errno, strerror(errno)); return ERROR; default: if(ufds.revents & (POLLIN | POLLPRI)) { return OK; } else { return ERROR; } } } }
int32_t reader_do_emm(struct s_reader * reader, EMM_PACKET *ep) { int32_t i, rc, ecs; unsigned char md5tmp[MD5_DIGEST_LENGTH]; struct timeb tps; struct s_client *cl = reader->client; if(!cl) return 0; cs_ftime(&tps); MD5(ep->emm, ep->emm[2], md5tmp); for (i = ecs = 0; i < CS_EMMCACHESIZE; i++) { if (!memcmp(cl->emmcache[i].emmd5, md5tmp, CS_EMMSTORESIZE)) { cl->emmcache[i].count++; if (reader->cachemm) { if (cl->emmcache[i].count > reader->rewritemm) { ecs = 2; //skip emm } else { ecs = 1; //rewrite emm } } break; } } // Ecs=0 not found in cache // Ecs=1 found in cache, rewrite emm // Ecs=2 skip if ((rc = ecs) < 2) { if (is_cascading_reader(reader)) { rdr_debug_mask(reader, D_READER, "network emm reader"); if (reader->ph.c_send_emm) { rc = reader->ph.c_send_emm(ep); } else { rdr_debug_mask(reader, D_READER, "send_emm() support missing"); rc = 0; } } else { rdr_debug_mask(reader, D_READER, "local emm reader"); rc = cardreader_do_emm(reader, ep); } if (!ecs) i = reader_store_emm(ep->type, md5tmp); } reader_log_emm(reader, ep, i, rc, &tps); return rc; }
int32_t add_ms_to_timeb(struct timeb *tb, int32_t ms) { struct timeb tb_now; tb->time += ms / 1000; tb->millitm += ms % 1000; if(tb->millitm >= 1000) { tb->millitm -= 1000; tb->time++; } cs_ftime(&tb_now); return comp_timeb(tb, &tb_now); }
static int32_t oscam_ser_poll(int32_t event, struct s_client *client) { int32_t msec; struct pollfd pfds; struct timeb tpc; cs_ftime(&tpc); msec=1000*(client->serialdata->tpe.time-tpc.time)+client->serialdata->tpe.millitm-tpc.millitm; if (msec<0) return(0); pfds.fd=cur_client()->pfd; pfds.events=event; pfds.revents=0; if (poll(&pfds, 1, msec)!=1) return(0); else return(((pfds.revents)&event)==event); }
void add_hitcache(struct s_client *cl, ECM_REQUEST *er) { if (!cfg.max_hitcache_time) //we don't want check/save hitcache return; if (!cfg.cacheex_wait_timetab.n) return; uint32_t cacheex_wait_time = get_cacheex_wait_time(er,NULL); if (!cacheex_wait_time) return; CACHE_HIT *result; HIT_KEY search; memset(&search, 0, sizeof(HIT_KEY)); search.caid = er->caid; search.prid = er->prid; search.srvid = er->srvid; pthread_rwlock_wrlock(&hitcache_lock); result = find_hash_table(&ht_hitcache, &search, sizeof(HIT_KEY), &compare_hitkey); if(!result){ //not found, add it! if(cs_malloc(&result, sizeof(CACHE_HIT))){ memset(result, 0, sizeof(CACHE_HIT)); result->key.caid = er->caid; result->key.prid = er->prid; result->key.srvid = er->srvid; add_hash_table(&ht_hitcache, &result->ht_node, &ll_hitcache, &result->ll_node, result, &result->key, sizeof(HIT_KEY)); } } if(result){ if(cl) result->grp |= cl->grp; cs_ftime(&result->time); //always update time; } pthread_rwlock_unlock(&hitcache_lock); }
static int32_t get_log_header(int32_t m, char *txt) { struct s_client *cl = cur_client(); struct tm lt; int32_t pos; cs_ftime(&log_ts); time_t walltime = cs_walltime(&log_ts); localtime_r(&walltime, <); pos = snprintf(txt, LOG_BUF_SIZE, "[LOG000]%4d/%02d/%02d %02d:%02d:%02d ", lt.tm_year + 1900, lt.tm_mon + 1, lt.tm_mday, lt.tm_hour, lt.tm_min, lt.tm_sec); switch(m) { case 1: // Add thread id and reader type return pos + snprintf(txt + pos, LOG_BUF_SIZE - pos, "%8X %c ", cl ? cl->tid : 0, cl ? cl->typ : ' '); case 0: // Add thread id return pos + snprintf(txt + pos, LOG_BUF_SIZE - pos, "%8X%-3.3s ", cl ? cl->tid : 0, ""); default: // Add empty thread id return pos + snprintf(txt + pos, LOG_BUF_SIZE - pos, "%8X%-3.3s ", 0, ""); } }
int32_t cardreader_do_emm(struct s_reader *reader, EMM_PACKET *ep) { int32_t rc; if (reader->typ == R_SMART ) { // check health does not work with new card status check but is actually not needed for emm. rc = 1; } else { rc = -1; rc = cardreader_do_checkhealth(reader); } if(rc) { if((1 << (ep->emm[0] % 0x80)) & reader->b_nano) { return 3; } if(reader->csystem.active && reader->csystem.do_emm) { rc = reader->csystem.do_emm(reader, ep); } else { rc = 0; } } if(rc > 0) { cs_ftime(&reader->emm_last); } // last time emm written is now! return (rc); }
static void reader_log_emm(struct s_reader * reader, EMM_PACKET *ep, int32_t i, int32_t rc, struct timeb *tps) { char *rtxt[] = { "error", is_cascading_reader(reader) ? "sent" : "written", "skipped", "blocked" }; char *typedesc[] = { "unknown", "unique", "shared", "global" }; struct s_client *cl = reader->client; struct timeb tpe; if (reader->logemm & (1 << rc)) { cs_ftime(&tpe); if (!tps) tps = &tpe; rdr_log(reader, "%s emmtype=%s, len=%d, idx=%d, cnt=%d: %s (%ld ms)", username(ep->client), typedesc[cl->emmcache[i].type], ep->emm[2], i, cl->emmcache[i].count, rtxt[rc], 1000 * (tpe.time - tps->time) + tpe.millitm - tps->millitm); } if (rc) { cl->lastemm = time(NULL); led_status_emm_ok(); } #if defined(WEBIF) || defined(LCDSUPPORT) //counting results switch (rc) { case 0: reader->emmerror[ep->type]++; break; case 1: reader->emmwritten[ep->type]++; break; case 2: reader->emmskipped[ep->type]++; break; case 3: reader->emmblocked[ep->type]++; break; } #endif }
/* * read /proc data into the passed struct pstat * returns 0 on success, -1 on error */ int8_t get_stats_linux(const pid_t pid, struct pstat* result) { // convert pid to string char pid_s[20]; snprintf(pid_s, sizeof(pid_s), "%d", pid); char stat_filepath[30] = "/proc/"; strncat(stat_filepath, pid_s, sizeof(stat_filepath) - strlen(stat_filepath) -1); strncat(stat_filepath, "/stat", sizeof(stat_filepath) - strlen(stat_filepath) -1); FILE *f_pstat = fopen(stat_filepath, "r"); if (f_pstat == NULL) { cs_log("FOPEN ERROR %s",stat_filepath); return -1; } FILE *f_stat = fopen("/proc/stat", "r"); if (!f_stat) { cs_log("ERROR: Can't open /proc/stat for reading: %s", strerror(errno)); return -1; } // read values from /proc/pid/stat uint64_t rss; if (fscanf(f_pstat, "%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %" SCNu32 "%" SCNu32 "%" SCNd32 "%" SCNd32 "%*d %*d %*d %*d %*u %" SCNu64 "%" SCNu64, &result->utime_ticks,&result->stime_ticks, &result->cutime_ticks,&result->cstime_ticks,&result->vsize, &rss) == EOF) { fclose(f_pstat); return -1; } fclose(f_pstat); result->rss = rss * getpagesize(); // read+calc cpu total time from /proc/stat uint32_t cpu_time[10]; if (fscanf(f_stat, "%*s %" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32 "%" SCNu32, &cpu_time[0], &cpu_time[1], &cpu_time[2], &cpu_time[3], &cpu_time[4], &cpu_time[5], &cpu_time[6], &cpu_time[7], &cpu_time[8], &cpu_time[9]) == EOF) { fclose(f_stat); return -1; } fclose(f_stat); int i; result->cpu_total_time = 0; for(i = 0; i < 10; i++) { result->cpu_total_time += cpu_time[i]; } // read cpu/meminfo from sysinfo() struct sysinfo info; float shiftfloat = (float)(1 << SI_LOAD_SHIFT); if (!sysinfo(&info)) { // cpu load result->cpu_avg[0] = (float) info.loads[0] / shiftfloat; result->cpu_avg[1] = (float) info.loads[1] / shiftfloat; result->cpu_avg[2] = (float) info.loads[2] / shiftfloat; // meminfo result->mem_total = info.totalram * info.mem_unit; result->mem_free = info.freeram * info.mem_unit; result->mem_used = (info.totalram * info.mem_unit) - (info.freeram * info.mem_unit); result->mem_buff = info.bufferram * info.mem_unit; } // set timestamp for function call cs_ftime(&result->time_started); return 0; }
static int32_t oscam_ser_recv(struct s_client *client, uchar *xbuf, int32_t l) { int32_t s, p, n, r; uchar job=IS_BAD; static uchar lb; static int32_t have_lb=0; uchar *buf=xbuf+1; struct s_serial_client *serialdata=client->serialdata; if (!client->pfd) return(-1); cs_ftime(&serialdata->tps); serialdata->tpe=serialdata->tps; serialdata->tpe.millitm+=serialdata->oscam_ser_timeout; serialdata->tpe.time+=(serialdata->tpe.millitm/1000); serialdata->tpe.millitm%=1000; buf[0]=lb; for (s=p=r=0, n=have_lb; (s<4) && (p>=0); s++) { switch(s) { case 0: // STAGE 0: skip known garbage from DSR9500 if (oscam_ser_selrec(buf, 2-n, l, &n)) { if ((buf[0]==0x0A) && (buf[1]==0x0D)) p=(-4); if ((buf[0]==0x0D) && (buf[1]==0x0A)) p=(-4); } else p=(-3); have_lb=0; break; case 1: // STAGE 1: identify protocol p=(-3); if (oscam_ser_selrec(buf, 1, l, &n)) // now we have 3 bytes in buf { if((buf[0] == 0x04) && (buf[1] == 0x00) && (buf[2] == 0x02)) { //skip unsupported Advanced Serial Sharing Protocol HF 8900 oscam_ser_selrec(buf, 2, l, &n); // get rest 2 bytes to buffor p=(-4); have_lb=0; break; } else { p=(-2); if (client->typ == 'c') // HERE IS SERVER { job=IS_ECM; // assume ECM switch(buf[0]) { case 0x00: if( (buf[1]==0x01)&&(buf[2]==0x00) ) { p=P_GS; job=IS_LGO; serialdata->tpe.time++; } break; case 0x01: if( (buf[1]&0xf0)==0xb0 ) p=P_GBOX; else {p=P_SSSP; job=IS_PMT;} break; // pmt-request case 0x02: p=P_HSIC; break; case 0x03: switch(serialdata->oscam_ser_proto) { case P_SSSP : case P_GS : case P_DSR95 : p=serialdata->oscam_ser_proto; break; case P_AUTO : p=(buf[1]<0x30) ? P_SSSP : P_DSR95; break; // auto for GS is useless !! } break; case 0x04: p=P_DSR95; job=IS_ECHO; serialdata->dsr9500type=P_DSR_GNUSMAS; break; case 0x7E: p=P_ALPHA; if (buf[1]!=0x80) job=IS_BAD; break; case 0x80: case 0x81: p=P_BOMBA; break; } } else // HERE IS CLIENT { job=IS_DCW; // assume DCW switch(serialdata->oscam_ser_proto) { case P_HSIC : if ((buf[0]==4) && (buf[1]==4)) p=P_HSIC; break; case P_BOMBA: p=P_BOMBA; break; case P_DSR95: if (buf[0]==4) p=P_DSR95; break; case P_ALPHA: if (buf[0]==0x88) p=P_ALPHA; break; } } if ((serialdata->oscam_ser_proto!=p) && (serialdata->oscam_ser_proto!=P_AUTO)) p=(-2); } } break; case 2: // STAGE 2: examine length if (client->typ == 'c') switch(p) { case P_SSSP : r=(buf[1]<<8)|buf[2]; break; case P_BOMBA : r=buf[2]; break; case P_HSIC : if (oscam_ser_selrec(buf, 12, l, &n)) r=buf[14]; else p=(-1); break; case P_DSR95 : if( job==IS_ECHO ) { r=17*serialdata->samsung_dcw-3+serialdata->samsung_0a; serialdata->samsung_dcw=serialdata->samsung_0a=0; } else { if (oscam_ser_selrec(buf, 16, l, &n)) { uchar b; if (cs_atob(&b, (char *)buf+17, 1)<0) p=(-2); else { r=(b<<1); r+=(serialdata->dsr9500type==P_DSR_WITHSID)?4:0; } } else p=(-1); } break; case P_GS : if (job==IS_LGO) r=5; else { if (oscam_ser_selrec(buf, 1, l, &n)) r=(buf[3]<<8)|buf[2]; else p=(-1); } break; case P_ALPHA : r=-0x7F; // char specifying EOT break; case P_GBOX : r=((buf[1]&0xf)<<8) | buf[2]; serialdata->gbox_lens.cat_len = r; break; default : serialdata->dsr9500type=P_DSR_AUTO; } else switch(p) { case P_HSIC : r=(buf[2]==0x3A) ? 20 : 0; break; // 3A=DCW / FC=ECM was wrong case P_BOMBA : r=13; break; case P_DSR95 : r=14; break; case P_ALPHA : r=(buf[1]<<8)|buf[2]; break; // should be 16 always } break; case 3: // STAGE 3: get the rest ... if (r>0) // read r additional bytes { int32_t all = n+r; if( !oscam_ser_selrec(buf, r, l, &n) ) { cs_debug_mask(D_CLIENT, "not all data received, waiting another 50 ms"); serialdata->tpe.millitm+=50; if( !oscam_ser_selrec(buf, all-n, l, &n) ) p=(-1); } // auto detect DSR9500 protocol if( client->typ == 'c' && p==P_DSR95 && serialdata->dsr9500type==P_DSR_AUTO ) { serialdata->tpe.millitm+=20; if( oscam_ser_selrec(buf, 2, l, &n) ) { if( cs_atoi((char *)buf+n-2, 1, 1)==0xFFFFFFFF ) { switch( (buf[n-2]<<8)|buf[n-1] ) { case 0x0A0D : serialdata->dsr9500type=P_DSR_OPEN; break; case 0x0D0A : serialdata->dsr9500type=P_DSR_PIONEER; break; default : serialdata->dsr9500type=P_DSR_UNKNOWN; break; } }else{ if( oscam_ser_selrec(buf, 2, l, &n) ) if( cs_atoi((char *)buf+n-2, 1, 1)==0xFFFFFFFF ) serialdata->dsr9500type=P_DSR_UNKNOWN; else serialdata->dsr9500type=P_DSR_WITHSID; else { serialdata->dsr9500type=P_DSR_UNKNOWN; p=(-1); } } } else serialdata->dsr9500type=P_DSR_GNUSMAS; if( p ) cs_log("detected dsr9500-%s type receiver", dsrproto_txt[serialdata->dsr9500type]); } // gbox if( client->typ == 'c' && p==P_GBOX ) { int32_t j; for( j=0; (j<3) && (p>0); j++) switch( j ) { case 0: // PMT head if( !oscam_ser_selrec(buf, 3, l, &n) ) p=(-1); else if( !(buf[n-3]==0x02 && (buf[n-2]&0xf0)==0xb0) ) p=(-2); break; case 1: // PMT + ECM header serialdata->gbox_lens.pmt_len=((buf[n-2]&0xf)<<8)|buf[n-1]; if( !oscam_ser_selrec(buf, serialdata->gbox_lens.pmt_len+3, l, &n) ) p=(-1); break; case 2: // ECM + ECM PID serialdata->gbox_lens.ecm_len=((buf[n-2]&0xf)<<8)|buf[n-1]; if( !oscam_ser_selrec(buf, serialdata->gbox_lens.ecm_len+4, l, &n) ) p=(-1); } } // gbox } else if (r<0) // read until specified char (-r) { while((buf[n-1]!=(-r)) && (p>0)) if (!oscam_ser_selrec(buf, 1, l, &n)) p=(-1); } break; } } if (p==(-2) || p==(-1)) { oscam_ser_selrec(buf, l-n, l, &n); // flush buffer serialdata->serial_errors++; } cs_ftime(&serialdata->tpe); cs_ddump_mask(D_CLIENT, buf, n, "received %d bytes from %s in %ld msec", n, remote_txt(), 1000*(serialdata->tpe.time-serialdata->tps.time)+serialdata->tpe.millitm-serialdata->tps.millitm); client->last=serialdata->tpe.time; switch(p) { case (-1): if (client->typ == 'c'&&(n>2)&&(buf[0]==2)&&(buf[1]==2)&&(buf[2]==2)) { oscam_ser_disconnect(); cs_log("humax powered on"); // this is nice ;) } else { if(client->typ == 'c' && buf[0] == 0x1 && buf[1] == 0x08 && buf[2] == 0x20 && buf[3] == 0x08) { oscam_ser_disconnect(); cs_log("ferguson powered on"); // this is nice to ;) } else cs_log(incomplete, n); } break; case (-2): cs_debug_mask(D_CLIENT, "unknown request or garbage"); break; } xbuf[0]=(uchar) ((job<<4) | p); return((p<0)?0:n+1); }
void cleanup_cache(void){ ECMHASH *ecmhash; CW *cw; struct s_pushclient *pc, *nxt; node *i,*i_next,*j,*j_next; struct timeb now; int32_t gone_first, gone_upd; pthread_rwlock_wrlock(&cache_lock); i = get_first_node_list(&ll_cache); while (i) { i_next = i->next; ecmhash = get_data_from_node(i); cs_ftime(&now); gone_first = comp_timeb(&now, &ecmhash->first_recv_time); gone_upd = comp_timeb(&now, &ecmhash->upd_time); if(ecmhash && gone_first<=(cfg.max_cache_time*1000)){ //not continue, useless check for nexts one! break; } if(ecmhash && gone_upd>(cfg.max_cache_time*1000)){ j = get_first_node_list(&ecmhash->ll_cw); while (j) { j_next = j->next; cw = get_data_from_node(j); if(cw){ pthread_rwlock_destroy(&cw->pushout_client_lock); pc = cw->pushout_client; cw->pushout_client=NULL; while (pc) { nxt = pc->next_push; NULLFREE(pc); pc = nxt; } remove_elem_list(&ecmhash->ll_cw, &cw->ll_node); remove_elem_hash_table(&ecmhash->ht_cw, &cw->ht_node); NULLFREE(cw); } j = j_next; } deinitialize_hash_table(&ecmhash->ht_cw); remove_elem_list(&ll_cache, &ecmhash->ll_node); remove_elem_hash_table(&ht_cache, &ecmhash->ht_node); NULLFREE(ecmhash); } i = i_next; } pthread_rwlock_unlock(&cache_lock); }
static void process_clients(void) { int32_t i, k, j, rc, pfdcount = 0; struct s_client *cl; struct s_reader *rdr; struct pollfd *pfd; struct s_client **cl_list; struct timeb start, end; // start time poll, end time poll uint32_t cl_size = 0; uchar buf[10]; if (pipe(thread_pipe) == -1) { printf("cannot create pipe, errno=%d\n", errno); exit(1); } cl_size = chk_resize_cllist(&pfd, &cl_list, 0, 100); pfd[pfdcount].fd = thread_pipe[0]; pfd[pfdcount].events = POLLIN | POLLPRI; cl_list[pfdcount] = NULL; while (!exit_oscam) { pfdcount = 1; //connected tcp clients for (cl=first_client->next; cl; cl=cl->next) { if (cl->init_done && !cl->kill && cl->pfd && cl->typ=='c' && !cl->is_udp) { if (cl->pfd && !cl->thread_active) { cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount); cl_list[pfdcount] = cl; pfd[pfdcount].fd = cl->pfd; pfd[pfdcount++].events = POLLIN | POLLPRI; } } //reader: //TCP: // - TCP socket must be connected // - no active init thread //UDP: // - connection status ignored // - no active init thread rdr = cl->reader; if (rdr && cl->typ=='p' && cl->init_done) { if (cl->pfd && !cl->thread_active && ((rdr->tcp_connected && rdr->ph.type==MOD_CONN_TCP)||(rdr->ph.type==MOD_CONN_UDP))) { cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount); cl_list[pfdcount] = cl; pfd[pfdcount].fd = cl->pfd; pfd[pfdcount++].events = (POLLIN | POLLPRI); } } } //server (new tcp connections or udp messages) for (k = 0; k < CS_MAX_MOD; k++) { struct s_module *module = &modules[k]; if ((module->type & MOD_CONN_NET)) { for (j = 0; j < module->ptab.nports; j++) { if (module->ptab.ports[j].fd) { cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount); cl_list[pfdcount] = NULL; pfd[pfdcount].fd = module->ptab.ports[j].fd; pfd[pfdcount++].events = (POLLIN | POLLPRI); } } } } if (pfdcount >= 1024) cs_log("WARNING: too many users!"); cs_ftime(&start); // register start time rc = poll(pfd, pfdcount, 5000); if (rc<1) continue; cs_ftime(&end); // register end time for (i=0; i<pfdcount&&rc>0; i++) { if (pfd[i].revents == 0) continue; // skip sockets with no changes rc--; //event handled! cs_debug_mask(D_TRACE, "[OSCAM] new event %d occurred on fd %d after %ld ms inactivity", pfd[i].revents, pfd[i].fd,1000*(end.time-start.time)+end.millitm-start.millitm); //clients cl = cl_list[i]; if (cl && !is_valid_client(cl)) continue; if (pfd[i].fd == thread_pipe[0] && (pfd[i].revents & (POLLIN | POLLPRI))) { // a thread ended and cl->pfd should be added to pollfd list again (thread_active==0) int32_t len= read(thread_pipe[0], buf, sizeof(buf)); if(len == -1){ cs_debug_mask(D_TRACE, "[OSCAM] Reading from pipe failed (errno=%d %s)", errno, strerror(errno)); } cs_ddump_mask(D_TRACE, buf, len, "[OSCAM] Readed:"); continue; } //clients // message on an open tcp connection if (cl && cl->init_done && cl->pfd && (cl->typ == 'c' || cl->typ == 'm')) { if (pfd[i].fd == cl->pfd && (pfd[i].revents & (POLLHUP | POLLNVAL | POLLERR))) { //client disconnects kill_thread(cl); continue; } if (pfd[i].fd == cl->pfd && (pfd[i].revents & (POLLIN | POLLPRI))) { add_job(cl, ACTION_CLIENT_TCP, NULL, 0); } } //reader // either an ecm answer, a keepalive or connection closed from a proxy // physical reader ('r') should never send data without request rdr = NULL; struct s_client *cl2 = NULL; if (cl && cl->typ == 'p'){ rdr = cl->reader; if(rdr) cl2 = rdr->client; } if (rdr && cl2 && cl2->init_done) { if (cl2->pfd && pfd[i].fd == cl2->pfd && (pfd[i].revents & (POLLHUP | POLLNVAL | POLLERR))) { //connection to remote proxy was closed //oscam should check for rdr->tcp_connected and reconnect on next ecm request sent to the proxy network_tcp_connection_close(rdr, "closed"); rdr_debug_mask(rdr, D_READER, "connection closed"); } if (cl2->pfd && pfd[i].fd == cl2->pfd && (pfd[i].revents & (POLLIN | POLLPRI))) { add_job(cl2, ACTION_READER_REMOTE, NULL, 0); } } //server sockets // new connection on a tcp listen socket or new message on udp listen socket if (!cl && (pfd[i].revents & (POLLIN | POLLPRI))) { for (k = 0; k < CS_MAX_MOD; k++) { struct s_module *module = &modules[k]; if ((module->type & MOD_CONN_NET)) { for (j = 0; j < module->ptab.nports; j++) { if (module->ptab.ports[j].fd && module->ptab.ports[j].fd == pfd[i].fd) { accept_connection(module, k, j); } } } } } } cs_ftime(&start); // register start time for new poll next run first_client->last=time((time_t *)0); } free(pfd); free(cl_list); return; }
static int32_t ecm_ratelimit_findspace(struct s_reader *reader, ECM_REQUEST *er, struct ecmrl rl, int32_t reader_mode) { int32_t h, foundspace = -1; int32_t maxecms = MAXECMRATELIMIT; // init maxecms int32_t totalecms = 0; // init totalecms struct timeb actualtime; cs_ftime(&actualtime); for(h = 0; h < MAXECMRATELIMIT; h++) // release slots with srvid that are overtime, even if not called from reader module to maximize available slots! { if(reader->rlecmh[h].last.time == -1) { continue; } int32_t gone = comp_timeb(&actualtime, &reader->rlecmh[h].last); if( gone >= (reader->rlecmh[h].ratelimittime + reader->rlecmh[h].srvidholdtime) || gone < 0) // gone <0 fixup for bad systemtime on dvb receivers while changing transponders { cs_debug_mask(D_CLIENT, "ratelimiter srvid %04X released from slot #%d/%d of reader %s (%d>=%d ratelimit ms + %d ms srvidhold!)", reader->rlecmh[h].srvid, h + 1, MAXECMRATELIMIT, reader->label, gone, reader->rlecmh[h].ratelimittime, reader->rlecmh[h].srvidholdtime); reader->rlecmh[h].last.time = -1; reader->rlecmh[h].srvid = -1; reader->rlecmh[h].kindecm = 0; } if(reader->rlecmh[h].last.time == -1) { continue; } if(reader->rlecmh[h].ratelimitecm < maxecms) { maxecms = reader->rlecmh[h].ratelimitecm; } // we found a more critical ratelimit srvid totalecms++; } cs_debug_mask(D_CLIENT, "ratelimiter found total of %d srvid for reader %s most critical is limited to %d requests", totalecms, reader->label, maxecms); if(reader->cooldown[0] && reader->cooldownstate != 1) { maxecms = MAXECMRATELIMIT; } // dont apply ratelimits if cooldown isnt in use or not in effect for(h = 0; h < MAXECMRATELIMIT; h++) // check if srvid is already in a slot { if(reader->rlecmh[h].last.time == -1) { continue; } if(reader->rlecmh[h].srvid == er->srvid && reader->rlecmh[h].caid == rl.caid && reader->rlecmh[h].provid == rl.provid && (!reader->rlecmh[h].chid || (reader->rlecmh[h].chid == rl.chid))) { int32_t gone = comp_timeb(&actualtime, &reader->rlecmh[h].last); cs_debug_mask(D_CLIENT, "ratelimiter found srvid %04X for %d ms in slot #%d/%d of reader %s", er->srvid, gone, h + 1, MAXECMRATELIMIT, reader->label); // check ecmunique if enabled and ecmunique time is done if(reader_mode && reader->ecmunique) { gone = comp_timeb(&actualtime, &reader->rlecmh[h].last); if(gone < reader->ratelimittime) { if(memcmp(reader->rlecmh[h].ecmd5, er->ecmd5, CS_ECMSTORESIZE)) { if(er->ecm[0] == reader->rlecmh[h].kindecm) { char ecmd5[17 * 3]; cs_hexdump(0, reader->rlecmh[h].ecmd5, 16, ecmd5, sizeof(ecmd5)); cs_debug_mask(D_CLIENT, "ratelimiter ecm %s in this slot for next %d ms!", ecmd5, (int)(reader->rlecmh[h].ratelimittime - gone)); struct ecm_request_t *erold = NULL; if(!cs_malloc(&erold, sizeof(struct ecm_request_t))) { return -2; } memcpy(erold, er, sizeof(struct ecm_request_t)); // copy ecm all memcpy(erold->ecmd5, reader->rlecmh[h].ecmd5, CS_ECMSTORESIZE); // replace md5 hash struct ecm_request_t *ecm = NULL; ecm = check_cache(erold, erold->client); //CHECK IF FOUND ECM IN CACHE NULLFREE(erold); if(ecm) //found in cache { write_ecm_answer(reader, er, ecm->rc, ecm->rcEx, ecm->cw, NULL); } else { write_ecm_answer(reader, er, E_NOTFOUND, E2_RATELIMIT, NULL, "Ratelimiter: no slots free!"); } NULLFREE(ecm); return -2; } continue; } } if((er->ecm[0] == reader->rlecmh[h].kindecm) && (gone <= (reader->ratelimittime + reader->srvidholdtime))) { cs_debug_mask(D_CLIENT, "ratelimiter srvid %04X ecm type %s, only allowing %s for next %d ms in slot #%d/%d of reader %s -> skipping this slot!", reader->rlecmh[h].srvid, (reader->rlecmh[h].kindecm == 0x80 ? "even" : "odd"), (reader->rlecmh[h].kindecm == 0x80 ? "odd" : "even"), (int)(reader->rlecmh[h].ratelimittime + reader->rlecmh[h].srvidholdtime - gone), h + 1, maxecms, reader->label); continue; } } if(h > 0) { for(foundspace = 0; foundspace < h; foundspace++) // check for free lower slot { if(reader->rlecmh[foundspace].last.time == -1) { reader->rlecmh[foundspace] = reader->rlecmh[h]; // replace ecm request info reader->rlecmh[h].srvid = -1; reader->rlecmh[h].last.time = -1; if(foundspace < maxecms) { cs_debug_mask(D_CLIENT, "ratelimiter moved srvid %04X to slot #%d/%d of reader %s", er->srvid, foundspace + 1, maxecms, reader->label); return foundspace; // moving to lower free slot! } else { cs_debug_mask(D_CLIENT, "ratelimiter removed srvid %04X from slot #%d/%d of reader %s", er->srvid, foundspace + 1, maxecms, reader->label); reader->rlecmh[foundspace].last.time = -1; // free this slot since we are over ratelimit! return -1; // sorry, ratelimit! } } } } if(h < maxecms) // found but cant move to lower position! { return h; // return position if within ratelimits! } else { reader->rlecmh[h].last.time = -1; // free this slot since we are over ratelimit! cs_debug_mask(D_CLIENT, "ratelimiter removed srvid %04X from slot #%d/%d of reader %s", er->srvid, h + 1, maxecms, reader->label); return -1; // sorry, ratelimit! } } } // srvid not found in slots! if((reader->cooldown[0] && reader->cooldownstate == 1) || !reader->cooldown[0]) { ; // do we use cooldown at all, are we in cooldown fase? // we are in cooldown or no cooldown configured! if(totalecms + 1 > maxecms || totalecms + 1 > rl.ratelimitecm) // check if this channel fits in! { cs_debug_mask(D_CLIENT, "ratelimiter for reader %s has no free slots!", reader->label); return -1; } } else { maxecms = MAXECMRATELIMIT; // no limits right now! } for(h = 0; h < maxecms; h++) // check for free slot { if(reader->rlecmh[h].last.time == -1) { if(reader_mode) { cs_debug_mask(D_CLIENT, "ratelimiter added srvid %04X to slot #%d/%d of reader %s", er->srvid, h + 1, maxecms, reader->label); } return h; // free slot found -> assign it! } else { int32_t gone = comp_timeb(&actualtime, &reader->rlecmh[h].last); cs_debug_mask(D_CLIENT, "ratelimiter srvid %04X for %d ms present in slot #%d/%d of reader %s", reader->rlecmh[h].srvid, gone , h + 1, maxecms, reader->label); } //occupied slots } #ifdef HAVE_DVBAPI /* Overide ratelimit priority for dvbapi request */ foundspace = -1; int32_t gone = 0; if((cfg.dvbapi_enabled == 1) && streq(er->client->account->usr, cfg.dvbapi_usr)) { if(reader->lastdvbapirateoverride.time == 0) { // fixup for first run! gone = comp_timeb(&actualtime, &reader->lastdvbapirateoverride); } if(gone > reader->ratelimittime) { struct timeb minecmtime = actualtime; for(h = 0; h < MAXECMRATELIMIT; h++) { gone = comp_timeb(&minecmtime, &reader->rlecmh[h].last); if(gone > 0) { minecmtime = reader->rlecmh[h].last; foundspace = h; } } reader->lastdvbapirateoverride = actualtime; cs_debug_mask(D_CLIENT, "prioritizing DVBAPI user %s over other watching client", er->client->account->usr); cs_debug_mask(D_CLIENT, "ratelimiter forcing srvid %04X into slot #%d/%d of reader %s", er->srvid, foundspace + 1, maxecms, reader->label); return foundspace; } else cs_debug_mask(D_CLIENT, "DVBAPI User %s is switching too fast for ratelimit and can't be prioritized!", er->client->account->usr); } #endif return (-1); // no slot found }
int32_t ecm_ratelimit_check(struct s_reader *reader, ECM_REQUEST *er, int32_t reader_mode) // If reader_mode is 1, ECM_REQUEST need to be assigned to reader and slot. // Else just report if a free slot is available. { // No rate limit set if(!reader->ratelimitecm) { return OK; } int32_t foundspace = -1, h, maxslots = MAXECMRATELIMIT; //init slots to oscam global maximums struct ecmrl rl; struct timeb now; rl = get_ratelimit(er); if(rl.ratelimitecm > 0) { cs_debug_mask(D_CLIENT, "ratelimit found for CAID: %04X PROVID: %06X SRVID: %04X CHID: %04X maxecms: %d cycle: %d ms srvidhold: %d ms", rl.caid, rl.provid, rl.srvid, rl.chid, rl.ratelimitecm, rl.ratelimittime, rl.srvidholdtime); } else // nothing found: apply general reader limits { rl.ratelimitecm = reader->ratelimitecm; rl.ratelimittime = reader->ratelimittime; rl.srvidholdtime = reader->srvidholdtime; rl.caid = er->caid; rl.provid = er->prid; rl.chid = er->chid; rl.srvid = er->srvid; cs_debug_mask(D_CLIENT, "ratelimiter apply readerdefault for CAID: %04X PROVID: %06X SRVID: %04X CHID: %04X maxecms: %d cycle: %d ms srvidhold: %d ms", rl.caid, rl.provid, rl.srvid, rl.chid, rl.ratelimitecm, rl.ratelimittime, rl.srvidholdtime); } // Below this line: rate limit functionality. // No cooldown set if(!reader->cooldown[0]) { cs_debug_mask(D_CLIENT, "ratelimiter find a slot for srvid %04X on reader %s", er->srvid, reader->label); foundspace = ecm_ratelimit_findspace(reader, er, rl, reader_mode); if(foundspace < 0) { if(reader_mode) { if(foundspace != -2) { cs_debug_mask(D_CLIENT, "ratelimiter no free slot for srvid %04X on reader %s -> dropping!", er->srvid, reader->label); write_ecm_answer(reader, er, E_NOTFOUND, E2_RATELIMIT, NULL, "Ratelimiter: no slots free!"); } } return ERROR; // not even trowing an error... obvious reason ;) } else //we are within ecmratelimits { if(reader_mode) { // Register new slot //reader->rlecmh[foundspace].srvid=er->srvid; // register srvid reader->rlecmh[foundspace] = rl; // register this srvid ratelimit params cs_ftime(&reader->rlecmh[foundspace].last); // register request time memcpy(reader->rlecmh[foundspace].ecmd5, er->ecmd5, CS_ECMSTORESIZE);// register ecmhash reader->rlecmh[foundspace].kindecm = er->ecm[0]; // register kind of ecm } return OK; } } // Below this line: rate limit functionality with cooldown option. // Cooldown state cycle: // state = 0: Cooldown setup phase. No rate limit set. // If number of ecm request exceed reader->ratelimitecm, cooldownstate goes to 2. // state = 2: Cooldown delay phase. No rate limit set. // If number of ecm request still exceed reader->ratelimitecm at end of cooldown delay phase, // cooldownstate goes to 1 (rate limit phase). // Else return back to setup phase (state 0). // state = 1: Cooldown ratelimit phase. Rate limit set. // If cooldowntime reader->cooldown[1] is elapsed, return to cooldown setup phase (state 0). cs_ftime(&now); int32_t gone = comp_timeb(&now, &reader->cooldowntime); if(reader->cooldownstate == 1) // Cooldown in ratelimit phase { if(gone <= reader->cooldown[1]*1000) // check if cooldowntime is elapsed { maxslots = reader->ratelimitecm; } // use user defined ratelimitecm else // Cooldown time is elapsed { reader->cooldownstate = 0; // set cooldown setup phase reader->cooldowntime.time = -1; // reset cooldowntime maxslots = MAXECMRATELIMIT; //use oscam defined max slots cs_log("Reader: %s ratelimiter returning to setup phase cooling down period of %d seconds is done!", reader->label, reader->cooldown[1]); } } // if cooldownstate == 1 if(reader->cooldownstate == 2 && gone > reader->cooldown[0]*1000) { // Need to check if the otherslots are not exceeding the ratelimit at the moment that // cooldown[0] time was exceeded! // time_t actualtime = reader->cooldowntime + reader->cooldown[0]; maxslots = 0; // maxslots is used as counter for(h = 0; h < MAXECMRATELIMIT; h++) { if(reader->rlecmh[h].last.time == -1) { continue; } // skip empty slots // how many active slots are registered at end of cooldown delay period gone = comp_timeb(&now, &reader->rlecmh[h].last); if(gone <= reader->ratelimittime) { maxslots++; if(maxslots >= reader->ratelimitecm) { break; } // Need to go cooling down phase } } if(maxslots < reader->ratelimitecm) { reader->cooldownstate = 0; // set cooldown setup phase reader->cooldowntime.time = -1; // reset cooldowntime maxslots = MAXECMRATELIMIT; // maxslots is maxslots again cs_log("Reader: %s ratelimiter returning to setup phase after %d seconds cooldowndelay!", reader->label, reader->cooldown[0]); } else { reader->cooldownstate = 1; // Entering ratelimit for cooldown ratelimitseconds cs_ftime(&reader->cooldowntime); // set time to enforce ecmratelimit for defined cooldowntime maxslots = reader->ratelimitecm; // maxslots is maxslots again sort_ecmrl(reader); // keep youngest ecm requests in list + housekeeping cs_log("Reader: %s ratelimiter starting cooling down period of %d seconds!", reader->label, reader->cooldown[1]); } } // if cooldownstate == 2 cs_debug_mask(D_CLIENT, "ratelimiter cooldownphase %d find a slot for srvid %04X on reader %s", reader->cooldownstate, er->srvid, reader->label); foundspace = ecm_ratelimit_findspace(reader, er, rl, reader_mode); if(foundspace < 0) { if(reader_mode) { if(foundspace != -2) { cs_debug_mask(D_CLIENT, "ratelimiter cooldownphase %d no free slot for srvid %04X on reader %s -> dropping!", reader->cooldownstate, er->srvid, reader->label); write_ecm_answer(reader, er, E_NOTFOUND, E2_RATELIMIT, NULL, "Ratelimiter: cooldown no slots free!"); } } return ERROR; // not even trowing an error... obvious reason ;) } else //we are within ecmratelimits { if(reader_mode) { // Register new slot //reader->rlecmh[foundspace].srvid=er->srvid; // register srvid reader->rlecmh[foundspace] = rl; // register this srvid ratelimit params cs_ftime(&reader->rlecmh[foundspace].last); // register request time memcpy(reader->rlecmh[foundspace].ecmd5, er->ecmd5, CS_ECMSTORESIZE);// register ecmhash reader->rlecmh[foundspace].kindecm = er->ecm[0]; // register kind of ecm } } if(reader->cooldownstate == 0 && foundspace >= reader->ratelimitecm) { if(!reader_mode) // No actual ecm request, just check { return OK; } cs_log("Reader: %s ratelimiter cooldown detected overrun ecmratelimit of %d during setup phase!", reader->label, (foundspace - reader->ratelimitecm + 1)); reader->cooldownstate = 2; // Entering cooldowndelay phase cs_ftime(&reader->cooldowntime); // Set cooldowntime to calculate delay cs_debug_mask(D_CLIENT, "ratelimiter cooldowndelaying %d seconds", reader->cooldown[0]); } // Cooldown state housekeeping is done. There is a slot available. if(reader_mode) { // Register new slot //reader->rlecmh[foundspace].srvid=er->srvid; // register srvid reader->rlecmh[foundspace] = rl; // register this srvid ratelimit params cs_ftime(&reader->rlecmh[foundspace].last); // register request time memcpy(reader->rlecmh[foundspace].ecmd5, er->ecmd5, CS_ECMSTORESIZE);// register ecmhash reader->rlecmh[foundspace].kindecm = er->ecm[0]; // register kind of ecm } return OK; }
void *work_thread(void *ptr) { struct job_data *data = (struct job_data *)ptr; struct s_client *cl = data->cl; struct s_reader *reader = cl->reader; struct timeb start, end; // start time poll, end time poll struct job_data tmp_data; struct pollfd pfd[1]; pthread_setspecific(getclient, cl); cl->thread = pthread_self(); cl->thread_active = 1; set_work_thread_name(data); struct s_module *module = get_module(cl); uint16_t bufsize = module->bufsize; //CCCam needs more than 1024bytes! if(!bufsize) { bufsize = 1024; } uint8_t *mbuf; if(!cs_malloc(&mbuf, bufsize)) { return NULL; } cl->work_mbuf = mbuf; // Track locally allocated data, because some callback may call cs_exit/cs_disconect_client/pthread_exit and then mbuf would be leaked int32_t n = 0, rc = 0, i, idx, s; uint8_t dcw[16]; int8_t restart_reader = 0; while(cl->thread_active) { cs_ftime(&start); // register start time while(cl->thread_active) { if(!cl || cl->kill || !is_valid_client(cl)) { pthread_mutex_lock(&cl->thread_lock); cl->thread_active = 0; pthread_mutex_unlock(&cl->thread_lock); cs_debug_mask(D_TRACE, "ending thread (kill)"); __free_job_data(cl, data); cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf) free_client(cl); if(restart_reader) { restart_cardreader(reader, 0); } NULLFREE(mbuf); pthread_exit(NULL); return NULL; } if(data && data->action != ACTION_READER_CHECK_HEALTH) { cs_debug_mask(D_TRACE, "data from add_job action=%d client %c %s", data->action, cl->typ, username(cl)); } if(!data) { if(!cl->kill && cl->typ != 'r') { client_check_status(cl); } // do not call for physical readers as this might cause an endless job loop pthread_mutex_lock(&cl->thread_lock); if(cl->joblist && ll_count(cl->joblist) > 0) { LL_ITER itr = ll_iter_create(cl->joblist); data = ll_iter_next_remove(&itr); if(data) { set_work_thread_name(data); } //cs_debug_mask(D_TRACE, "start next job from list action=%d", data->action); } pthread_mutex_unlock(&cl->thread_lock); } if(!data) { /* for serial client cl->pfd is file descriptor for serial port not socket for example: pfd=open("/dev/ttyUSB0"); */ if(!cl->pfd || module->listenertype == LIS_SERIAL) { break; } pfd[0].fd = cl->pfd; pfd[0].events = POLLIN | POLLPRI; pthread_mutex_lock(&cl->thread_lock); cl->thread_active = 2; pthread_mutex_unlock(&cl->thread_lock); rc = poll(pfd, 1, 3000); pthread_mutex_lock(&cl->thread_lock); cl->thread_active = 1; pthread_mutex_unlock(&cl->thread_lock); if(rc > 0) { cs_ftime(&end); // register end time cs_debug_mask(D_TRACE, "[OSCAM-WORK] new event %d occurred on fd %d after %"PRId64" ms inactivity", pfd[0].revents, pfd[0].fd, comp_timeb(&end, &start)); data = &tmp_data; data->ptr = NULL; cs_ftime(&start); // register start time for new poll next run if(reader) { data->action = ACTION_READER_REMOTE; } else { if(cl->is_udp) { data->action = ACTION_CLIENT_UDP; data->ptr = mbuf; data->len = bufsize; } else { data->action = ACTION_CLIENT_TCP; } if(pfd[0].revents & (POLLHUP | POLLNVAL | POLLERR)) { cl->kill = 1; } } } } if(!data) { continue; } if(!reader && data->action < ACTION_CLIENT_FIRST) { __free_job_data(cl, data); break; } if(!data->action) { break; } struct timeb actualtime; cs_ftime(&actualtime); int32_t gone = comp_timeb(&actualtime, &data->time); if(data != &tmp_data && gone > (int) cfg.ctimeout+1000) { cs_debug_mask(D_TRACE, "dropping client data for %s time %dms", username(cl), gone); __free_job_data(cl, data); continue; } if(data != &tmp_data) { cl->work_job_data = data; } // Track the current job_data switch(data->action) { case ACTION_READER_IDLE: reader_do_idle(reader); break; case ACTION_READER_REMOTE: s = check_fd_for_data(cl->pfd); if(s == 0) // no data, another thread already read from fd? { break; } if(s < 0) { if(reader->ph.type == MOD_CONN_TCP) { network_tcp_connection_close(reader, "disconnect"); } break; } rc = reader->ph.recv(cl, mbuf, bufsize); if(rc < 0) { if(reader->ph.type == MOD_CONN_TCP) { network_tcp_connection_close(reader, "disconnect on receive"); } break; } cl->last = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER **************** idx = reader->ph.c_recv_chk(cl, dcw, &rc, mbuf, rc); if(idx < 0) { break; } // no dcw received if(!idx) { idx = cl->last_idx; } reader->last_g = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER **************** // for reconnect timeout for(i = 0, n = 0; i < cfg.max_pending && n == 0; i++) { if(cl->ecmtask[i].idx == idx) { cl->pending--; casc_check_dcw(reader, i, rc, dcw); n++; } } break; case ACTION_READER_RESET: cardreader_do_reset(reader); break; case ACTION_READER_ECM_REQUEST: reader_get_ecm(reader, data->ptr); break; case ACTION_READER_EMM: reader_do_emm(reader, data->ptr); break; case ACTION_READER_CARDINFO: reader_do_card_info(reader); break; case ACTION_READER_INIT: if(!cl->init_done) { reader_init(reader); } break; case ACTION_READER_RESTART: cl->kill = 1; restart_reader = 1; break; case ACTION_READER_RESET_FAST: reader->card_status = CARD_NEED_INIT; cardreader_do_reset(reader); break; case ACTION_READER_CHECK_HEALTH: cardreader_do_checkhealth(reader); break; case ACTION_READER_CAPMT_NOTIFY: if(reader->ph.c_capmt) { reader->ph.c_capmt(cl, data->ptr); } break; case ACTION_CLIENT_UDP: n = module->recv(cl, data->ptr, data->len); if(n < 0) { break; } module->s_handler(cl, data->ptr, n); break; case ACTION_CLIENT_TCP: s = check_fd_for_data(cl->pfd); if(s == 0) // no data, another thread already read from fd? { break; } if(s < 0) // system error or fd wants to be closed { cl->kill = 1; // kill client on next run continue; } n = module->recv(cl, mbuf, bufsize); if(n < 0) { cl->kill = 1; // kill client on next run continue; } module->s_handler(cl, mbuf, n); break; case ACTION_CACHEEX_TIMEOUT: #ifdef CS_CACHEEX cacheex_timeout(data->ptr); #endif break; case ACTION_FALLBACK_TIMEOUT: fallback_timeout(data->ptr); break; case ACTION_CLIENT_TIMEOUT: ecm_timeout(data->ptr); break; case ACTION_ECM_ANSWER_READER: chk_dcw(data->ptr); break; case ACTION_ECM_ANSWER_CACHE: write_ecm_answer_fromcache(data->ptr); break; case ACTION_CLIENT_INIT: if(module->s_init) { module->s_init(cl); } cl->is_udp = module->type == MOD_CONN_UDP; cl->init_done = 1; break; case ACTION_CLIENT_IDLE: if(module->s_idle) { module->s_idle(cl); } else { cs_log("user %s reached %d sec idle limit.", username(cl), cfg.cmaxidle); cl->kill = 1; } break; case ACTION_CACHE_PUSH_OUT: { #ifdef CS_CACHEEX ECM_REQUEST *er = data->ptr; int32_t res = 0, stats = -1; // cc-nodeid-list-check if(reader) { if(reader->ph.c_cache_push_chk && !reader->ph.c_cache_push_chk(cl, er)) { break; } res = reader->ph.c_cache_push(cl, er); stats = cacheex_add_stats(cl, er->caid, er->srvid, er->prid, 0); } else { if(module->c_cache_push_chk && !module->c_cache_push_chk(cl, er)) { break; } res = module->c_cache_push(cl, er); } debug_ecm(D_CACHEEX, "pushed ECM %s to %s res %d stats %d", buf, username(cl), res, stats); cl->cwcacheexpush++; if(cl->account) { cl->account->cwcacheexpush++; } first_client->cwcacheexpush++; #endif break; } case ACTION_CLIENT_KILL: cl->kill = 1; break; case ACTION_CLIENT_SEND_MSG: { #ifdef MODULE_CCCAM struct s_clientmsg *clientmsg = (struct s_clientmsg *)data->ptr; cc_cmd_send(cl, clientmsg->msg, clientmsg->len, clientmsg->cmd); #endif break; } } // switch __free_job_data(cl, data); } if(thread_pipe[1] && (mbuf[0] != 0x00)) { cs_ddump_mask(D_TRACE, mbuf, 1, "[OSCAM-WORK] Write to pipe:"); if(write(thread_pipe[1], mbuf, 1) == -1) // wakeup client check { cs_debug_mask(D_TRACE, "[OSCAM-WORK] Writing to pipe failed (errno=%d %s)", errno, strerror(errno)); } } // Check for some race condition where while we ended, another thread added a job pthread_mutex_lock(&cl->thread_lock); if(cl->joblist && ll_count(cl->joblist) > 0) { pthread_mutex_unlock(&cl->thread_lock); continue; } else { cl->thread_active = 0; pthread_mutex_unlock(&cl->thread_lock); break; } } cl->thread_active = 0; cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf) NULLFREE(mbuf); pthread_exit(NULL); return NULL; }
/** * adds a job to the job queue * if ptr should be free() after use, set len to the size * else set size to 0 **/ int32_t add_job(struct s_client *cl, enum actions action, void *ptr, int32_t len) { if(!cl || cl->kill) { if(!cl) { cs_log("WARNING: add_job failed. Client killed!"); } // Ignore jobs for killed clients if(len && ptr) { NULLFREE(ptr); } return 0; } #ifdef CS_CACHEEX // Avoid full running queues: if(action == ACTION_CACHE_PUSH_OUT && ll_count(cl->joblist) > 2000) { cs_debug_mask(D_TRACE, "WARNING: job queue %s %s has more than 2000 jobs! count=%d, dropped!", cl->typ == 'c' ? "client" : "reader", username(cl), ll_count(cl->joblist)); if(len && ptr) { NULLFREE(ptr); } // Thread down??? pthread_mutex_lock(&cl->thread_lock); if(cl && !cl->kill && cl->thread && cl->thread_active) { // Just test for invalid thread id: if(pthread_detach(cl->thread) == ESRCH) { cl->thread_active = 0; cs_debug_mask(D_TRACE, "WARNING: %s %s thread died!", cl->typ == 'c' ? "client" : "reader", username(cl)); } } pthread_mutex_unlock(&cl->thread_lock); return 0; } #endif struct job_data *data; if(!cs_malloc(&data, sizeof(struct job_data))) { if(len && ptr) { NULLFREE(ptr); } return 0; } data->action = action; data->ptr = ptr; data->cl = cl; data->len = len; cs_ftime(&data->time); pthread_mutex_lock(&cl->thread_lock); if(cl && !cl->kill && cl->thread_active) { if(!cl->joblist) { cl->joblist = ll_create("joblist"); } ll_append(cl->joblist, data); if(cl->thread_active == 2) { pthread_kill(cl->thread, OSCAM_SIGNAL_WAKEUP); } pthread_mutex_unlock(&cl->thread_lock); cs_debug_mask(D_TRACE, "add %s job action %d queue length %d %s", action > ACTION_CLIENT_FIRST ? "client" : "reader", action, ll_count(cl->joblist), username(cl)); return 1; } pthread_attr_t attr; pthread_attr_init(&attr); /* pcsc doesn't like this either; segfaults on x86, x86_64 */ struct s_reader *rdr = cl->reader; if(cl->typ != 'r' || !rdr || rdr->typ != R_PCSC) { pthread_attr_setstacksize(&attr, PTHREAD_STACK_SIZE); } if(action != ACTION_READER_CHECK_HEALTH) { cs_debug_mask(D_TRACE, "start %s thread action %d", action > ACTION_CLIENT_FIRST ? "client" : "reader", action); } int32_t ret = pthread_create(&cl->thread, &attr, work_thread, (void *)data); if(ret) { cs_log("ERROR: can't create thread for %s (errno=%d %s)", action > ACTION_CLIENT_FIRST ? "client" : "reader", ret, strerror(ret)); free_job_data(data); } else { pthread_detach(cl->thread); } pthread_attr_destroy(&attr); cl->thread_active = 1; pthread_mutex_unlock(&cl->thread_lock); return 1; }
/** * adds a job to the job queue * if ptr should be free() after use, set len to the size * else set size to 0 **/ int32_t add_job(struct s_client *cl, enum actions action, void *ptr, int32_t len) { if(!cl || cl->kill) { if(!cl) { cs_log("WARNING: add_job failed. Client killed!"); } // Ignore jobs for killed clients if(len && ptr) { NULLFREE(ptr); } return 0; } if(action == ACTION_CACHE_PUSH_OUT && cacheex_check_queue_length(cl)) { if(len && ptr) { NULLFREE(ptr); } return 0; } struct job_data *data; if(!cs_malloc(&data, sizeof(struct job_data))) { if(len && ptr) { NULLFREE(ptr); } return 0; } data->action = action; data->ptr = ptr; data->cl = cl; data->len = len; cs_ftime(&data->time); SAFE_MUTEX_LOCK(&cl->thread_lock); if(cl && !cl->kill && cl->thread_active) { if(!cl->joblist) { cl->joblist = ll_create("joblist"); } ll_append(cl->joblist, data); if(cl->thread_active == 2) { pthread_kill(cl->thread, OSCAM_SIGNAL_WAKEUP); } SAFE_MUTEX_UNLOCK(&cl->thread_lock); cs_log_dbg(D_TRACE, "add %s job action %d queue length %d %s", action > ACTION_CLIENT_FIRST ? "client" : "reader", action, ll_count(cl->joblist), username(cl)); return 1; } /* pcsc doesn't like this; segfaults on x86, x86_64 */ int8_t modify_stacksize = 0; struct s_reader *rdr = cl->reader; if(cl->typ != 'r' || !rdr || rdr->typ != R_PCSC) { modify_stacksize = 1; } if(action != ACTION_READER_CHECK_HEALTH) { cs_log_dbg(D_TRACE, "start %s thread action %d", action > ACTION_CLIENT_FIRST ? "client" : "reader", action); } int32_t ret = start_thread("client work", work_thread, (void *)data, &cl->thread, 1, modify_stacksize); if(ret) { cs_log("ERROR: can't create thread for %s (errno=%d %s)", action > ACTION_CLIENT_FIRST ? "client" : "reader", ret, strerror(ret)); free_job_data(data); } cl->thread_active = 1; SAFE_MUTEX_UNLOCK(&cl->thread_lock); return 1; }
int32_t init_srvid(void) { int8_t new_syntax = 1; FILE *fp = open_config_file("oscam.srvid2"); if(!fp) { fp = open_config_file(cs_srid); if(fp) { new_syntax = 0; } } if(!fp) { fp = create_config_file("oscam.srvid2"); if(fp) { flush_config_file(fp, "oscam.srvid2"); } return 0; } int32_t nr = 0, i, j; char *payload, *saveptr1 = NULL, *saveptr2 = NULL, *token; const char *tmp; if(!cs_malloc(&token, MAXLINESIZE)) { return 0; } struct s_srvid *srvid = NULL, *new_cfg_srvid[16], *last_srvid[16]; // A cache for strings within srvids. A checksum is calculated which is the start point in the array (some kind of primitive hash algo). // From this point, a sequential search is done. This greatly reduces the amount of string comparisons. const char **stringcache[1024]; int32_t allocated[1024] = { 0 }; int32_t used[1024] = { 0 }; struct timeb ts, te; cs_ftime(&ts); memset(last_srvid, 0, sizeof(last_srvid)); memset(new_cfg_srvid, 0, sizeof(new_cfg_srvid)); while(fgets(token, MAXLINESIZE, fp)) { int32_t l, len = 0, len2, srvidtmp; uint32_t k; uint32_t pos; char *srvidasc, *prov; tmp = trim(token); if(tmp[0] == '#') { continue; } if((l = strlen(tmp)) < 6) { continue; } if(!(srvidasc = strchr(token, ':'))) { continue; } if(!(payload = strchr(token, '|'))) { continue; } *payload++ = '\0'; if(!cs_malloc(&srvid, sizeof(struct s_srvid))) { NULLFREE(token); fclose(fp); return (1); } char tmptxt[128]; int32_t offset[4] = { -1, -1, -1, -1 }; char *ptr1 = NULL, *ptr2 = NULL; const char *searchptr[4] = { NULL, NULL, NULL, NULL }; const char **ptrs[4] = { &srvid->prov, &srvid->name, &srvid->type, &srvid->desc }; uint32_t max_payload_length = MAXLINESIZE - (payload - token); if(new_syntax) { ptrs[0] = &srvid->name; ptrs[1] = &srvid->type; ptrs[2] = &srvid->desc; ptrs[3] = &srvid->prov; } // allow empty strings as "||" if(payload[0] == '|' && (strlen(payload)+2 < max_payload_length)) { memmove(payload+1, payload, strlen(payload)+1); payload[0] = ' '; } for(k=1; ((k < max_payload_length) && (payload[k] != '\0')); k++) { if(payload[k-1] == '|' && payload[k] == '|') { if(strlen(payload+k)+2 < max_payload_length-k) { memmove(payload+k+1, payload+k, strlen(payload+k)+1); payload[k] = ' '; } else { break; } } } for(i = 0, ptr1 = strtok_r(payload, "|", &saveptr1); ptr1 && (i < 4) ; ptr1 = strtok_r(NULL, "|", &saveptr1), ++i) { // check if string is in cache len2 = strlen(ptr1); pos = 0; for(j = 0; j < len2; ++j) { pos += (uint8_t)ptr1[j]; } pos = pos % 1024; for(j = 0; j < used[pos]; ++j) { if(!strcmp(stringcache[pos][j], ptr1)) { searchptr[i] = stringcache[pos][j]; break; } } if(searchptr[i]) { continue; } offset[i] = len; cs_strncpy(tmptxt + len, trim(ptr1), sizeof(tmptxt) - len); len += strlen(ptr1) + 1; } char *tmpptr = NULL; if(len > 0 && !cs_malloc(&tmpptr, len)) { continue; } srvid->data = tmpptr; if(len > 0) { memcpy(tmpptr, tmptxt, len); } for(i = 0; i < 4; i++) { if(searchptr[i]) { *ptrs[i] = searchptr[i]; continue; } if(offset[i] > -1) { *ptrs[i] = tmpptr + offset[i]; // store string in stringcache tmp = *ptrs[i]; len2 = strlen(tmp); pos = 0; for(j = 0; j < len2; ++j) { pos += (uint8_t)tmp[j]; } pos = pos % 1024; if(used[pos] >= allocated[pos]) { if(allocated[pos] == 0) { if(!cs_malloc(&stringcache[pos], 16 * sizeof(char *))) { break; } } else { if(!cs_realloc(&stringcache[pos], (allocated[pos] + 16) * sizeof(char *))) { break; } } allocated[pos] += 16; } stringcache[pos][used[pos]] = tmp; used[pos] += 1; } } *srvidasc++ = '\0'; if(new_syntax) { srvidtmp = dyn_word_atob(token) & 0xFFFF; } else { srvidtmp = dyn_word_atob(srvidasc) & 0xFFFF; } if(srvidtmp < 0) { NULLFREE(tmpptr); NULLFREE(srvid); continue; } else { srvid->srvid = srvidtmp; } srvid->ncaid = 0; for(i = 0, ptr1 = strtok_r(new_syntax ? srvidasc : token, ",", &saveptr1); (ptr1); ptr1 = strtok_r(NULL, ",", &saveptr1), i++) { srvid->ncaid++; } if(!cs_malloc(&srvid->caid, sizeof(struct s_srvid_caid) * srvid->ncaid)) { NULLFREE(tmpptr); NULLFREE(srvid); return 0; } ptr1 = new_syntax ? srvidasc : token; for(i = 0; i < srvid->ncaid; i++) { prov = strchr(ptr1,'@'); srvid->caid[i].nprovid = 0; if(prov) { if(prov[1] != '\0') { for(j = 0, ptr2 = strtok_r(prov+1, "@", &saveptr2); (ptr2); ptr2 = strtok_r(NULL, "@", &saveptr2), j++) { srvid->caid[i].nprovid++; } if(!cs_malloc(&srvid->caid[i].provid, sizeof(uint32_t) * srvid->caid[i].nprovid)) { for(j = 0; j < i; j++) { NULLFREE(srvid->caid[j].provid); } NULLFREE(srvid->caid); NULLFREE(tmpptr); NULLFREE(srvid); return 0; } ptr2 = prov+1; for(j = 0; j < srvid->caid[i].nprovid; j++) { srvid->caid[i].provid[j] = dyn_word_atob(ptr2) & 0xFFFFFF; ptr2 = ptr2 + strlen(ptr2) + 1; } } else { ptr2 = prov+2; } prov[0] = '\0'; } srvid->caid[i].caid = dyn_word_atob(ptr1) & 0xFFFF; if(prov) { ptr1 = ptr2; } else { ptr1 = ptr1 + strlen(ptr1) + 1; } } nr++; if(new_cfg_srvid[srvid->srvid >> 12]) { last_srvid[srvid->srvid >> 12]->next = srvid; } else { new_cfg_srvid[srvid->srvid >> 12] = srvid; } last_srvid[srvid->srvid >> 12] = srvid; }
int32_t init_srvid(void) { FILE *fp = open_config_file(cs_srid); if (!fp) return 0; int32_t nr = 0, i; char *payload, *tmp, *saveptr1 = NULL, *token; if (!cs_malloc(&token, MAXLINESIZE)) return 0; struct s_srvid *srvid=NULL, *new_cfg_srvid[16], *last_srvid[16]; // A cache for strings within srvids. A checksum is calculated which is the start point in the array (some kind of primitive hash algo). // From this point, a sequential search is done. This greatly reduces the amount of string comparisons. char **stringcache[1024]; int32_t allocated[1024] = { 0 }; int32_t used[1024] = { 0 }; struct timeb ts, te; cs_ftime(&ts); memset(last_srvid, 0, sizeof(last_srvid)); memset(new_cfg_srvid, 0, sizeof(new_cfg_srvid)); while (fgets(token, MAXLINESIZE, fp)) { int32_t l, j, len=0, len2, srvidtmp; uint32_t pos; char *srvidasc; tmp = trim(token); if (tmp[0] == '#') continue; if ((l=strlen(tmp)) < 6) continue; if (!(srvidasc = strchr(token, ':'))) continue; if (!(payload=strchr(token, '|'))) continue; *payload++ = '\0'; if (!cs_malloc(&srvid, sizeof(struct s_srvid))) { free(token); fclose(fp); return(1); } char tmptxt[128]; int32_t offset[4] = { -1, -1, -1, -1 }; char *ptr1, *searchptr[4] = { NULL, NULL, NULL, NULL }; char **ptrs[4] = { &srvid->prov, &srvid->name, &srvid->type, &srvid->desc }; for (i = 0, ptr1 = strtok_r(payload, "|", &saveptr1); ptr1 && (i < 4) ; ptr1 = strtok_r(NULL, "|", &saveptr1), ++i){ // check if string is in cache len2 = strlen(ptr1); pos = 0; for(j = 0; j < len2; ++j) pos += (uint8_t)ptr1[j]; pos = pos%1024; for(j = 0; j < used[pos]; ++j){ if (!strcmp(stringcache[pos][j], ptr1)){ searchptr[i]=stringcache[pos][j]; break; } } if (searchptr[i]) continue; offset[i]=len; cs_strncpy(tmptxt+len, trim(ptr1), sizeof(tmptxt)-len); len+=strlen(ptr1)+1; } char *tmpptr = NULL; if (len > 0 && !cs_malloc(&tmpptr, len)) continue; srvid->data=tmpptr; if(len > 0) memcpy(tmpptr, tmptxt, len); for (i=0;i<4;i++) { if (searchptr[i]) { *ptrs[i] = searchptr[i]; continue; } if (offset[i]>-1){ *ptrs[i] = tmpptr + offset[i]; // store string in stringcache tmp = *ptrs[i]; len2 = strlen(tmp); pos = 0; for(j = 0; j < len2; ++j) pos += (uint8_t)tmp[j]; pos = pos%1024; if(used[pos] >= allocated[pos]){ if (allocated[pos] == 0) { if (!cs_malloc(&stringcache[pos], 16 * sizeof(char *))) break; } else { if (!cs_realloc(&stringcache[pos], (allocated[pos] + 16) * sizeof(char *))) break; } allocated[pos] += 16; } stringcache[pos][used[pos]] = tmp; used[pos] += 1; } } *srvidasc++ = '\0'; srvidtmp = dyn_word_atob(srvidasc) & 0xFFFF; //printf("srvid %s - %d\n",srvidasc,srvid->srvid ); if (srvidtmp<0) { free(tmpptr); free(srvid); continue; } else srvid->srvid = srvidtmp; srvid->ncaid = 0; for (i = 0, ptr1 = strtok_r(token, ",", &saveptr1); (ptr1) && (i < 10) ; ptr1 = strtok_r(NULL, ",", &saveptr1), i++){ srvid->caid[i] = dyn_word_atob(ptr1); srvid->ncaid = i+1; //cs_debug_mask(D_CLIENT, "ld caid: %04X srvid: %04X Prov: %s Chan: %s",srvid->caid[i],srvid->srvid,srvid->prov,srvid->name); } nr++; if (new_cfg_srvid[srvid->srvid>>12]) last_srvid[srvid->srvid>>12]->next = srvid; else new_cfg_srvid[srvid->srvid>>12] = srvid; last_srvid[srvid->srvid>>12] = srvid; }
void add_cache(ECM_REQUEST *er){ if(!er->csp_hash) return; ECMHASH *result = NULL; CW *cw = NULL; #ifdef CS_CACHEEX bool add_new_cw=false; #endif pthread_rwlock_wrlock(&cache_lock); //add csp_hash to cache result = find_hash_table(&ht_cache, &er->csp_hash, sizeof(int32_t), &compare_csp_hash); if(!result){ if(cs_malloc(&result, sizeof(ECMHASH))){ result->csp_hash = er->csp_hash; init_hash_table(&result->ht_cw, &result->ll_cw); cs_ftime(&result->first_recv_time); add_hash_table(&ht_cache, &result->ht_node, &ll_cache, &result->ll_node, result, &result->csp_hash, sizeof(int32_t)); }else{ pthread_rwlock_unlock(&cache_lock); cs_log("ERROR: NO added HASH to cache!!"); return; } } cs_ftime(&result->upd_time); //need to be updated at each cw! We use it for deleting this hash when no more cws arrive inside max_cache_time! //add cw to this csp hash cw = find_hash_table(&result->ht_cw, er->cw, sizeof(er->cw), &compare_cw); if(!cw){ if(count_hash_table(&result->ht_cw)>=10){ //max 10 different cws stored pthread_rwlock_unlock(&cache_lock); return; } while(1){ if(cs_malloc(&cw, sizeof(CW))){ memcpy(cw->cw, er->cw, sizeof(er->cw)); cw->odd_even = get_odd_even(er); cw->cwc_cycletime = er->cwc_cycletime; cw->cwc_next_cw_cycle = er->cwc_next_cw_cycle; cw->count= 0; cw->csp = 0; cw->cacheex = 0; cw->localcards=0; cw->proxy=0; cw->grp = 0; cw->caid = er->caid; cw->prid = er->prid; cw->srvid = er->srvid; cw->selected_reader=er->selected_reader; #ifdef CS_CACHEEX cw->cacheex_src=er->cacheex_src; #endif cw->pushout_client = NULL; while(1){ if (pthread_rwlock_init(&cw->pushout_client_lock, NULL) == 0) break; cs_log("Error creating lock pushout_client_lock!"); cs_sleepms(1); } add_hash_table(&result->ht_cw, &cw->ht_node, &result->ll_cw, &cw->ll_node, cw, cw->cw, sizeof(er->cw)); #ifdef CS_CACHEEX add_new_cw=true; #endif break; } cs_log("ERROR: NO added CW to cache!! Re-trying..."); cs_sleepms(1); } } //update if answered from csp/cacheex/local_proxy if(er->from_cacheex) cw->cacheex = 1; if(er->from_csp) cw->csp = 1; #ifdef CS_CACHEEX if(!er->cacheex_src){ #endif if(is_localreader(er->selected_reader, er)) cw->localcards=1; else cw->proxy = 1; #ifdef CS_CACHEEX } #endif //always update group and counter cw->grp |= er->grp; cw->count++; //sort cw_list by counter (DESC order) if(cw->count>1) sort_list(&result->ll_cw, count_sort); pthread_rwlock_unlock(&cache_lock); #ifdef CS_CACHEEX er->cw_cache=cw; cacheex_cache_push(er); //cacheex debug log lines and cw diff stuff if(check_client(er->cacheex_src)){ if(add_new_cw){ debug_ecm(D_CACHEEX|D_CSP, "got pushed ECM %s from %s", buf, er->from_csp ? "csp" : username(er->cacheex_src)); CW *cw_first = get_first_cw(result, er); if(er && cw_first){ //compare er cw with mostly counted cached cw if(memcmp(er->cw, cw_first->cw, sizeof(er->cw)) != 0) { er->cacheex_src->cwcacheexerrcw++; if (er->cacheex_src->account) er->cacheex_src->account->cwcacheexerrcw++; if (((0x0200| 0x0800) & cs_dblevel)) { //avoid useless operations if debug is not enabled char cw1[16*3+2], cw2[16*3+2]; cs_hexdump(0, er->cw, 16, cw1, sizeof(cw1)); cs_hexdump(0, cw_first->cw, 16, cw2, sizeof(cw2)); char ip1[20]="", ip2[20]=""; if (check_client(er->cacheex_src)) cs_strncpy(ip1, cs_inet_ntoa(er->cacheex_src->ip), sizeof(ip1)); if (check_client(cw_first->cacheex_src)) cs_strncpy(ip2, cs_inet_ntoa(cw_first->cacheex_src->ip), sizeof(ip2)); else if (cw_first->selected_reader && check_client(cw_first->selected_reader->client)) cs_strncpy(ip2, cs_inet_ntoa(cw_first->selected_reader->client->ip), sizeof(ip2)); debug_ecm(D_CACHEEX| D_CSP, "WARNING: Different CWs %s from %s(%s)<>%s(%s): %s<>%s ", buf, er->from_csp ? "csp" : username(er->cacheex_src), ip1, check_client(cw_first->cacheex_src)?username(cw_first->cacheex_src):(cw_first->selected_reader?cw_first->selected_reader->label:"unknown/csp"), ip2, cw1, cw2); } } } }else debug_ecm(D_CACHEEX| D_CSP, "got duplicate pushed ECM %s from %s", buf, er->from_csp ? "csp" : username(er->cacheex_src)); } #endif }