void gbox_write_share_cards_info(void) { uint16_t card_count_shared = 0; uint16_t card_count_expired = 0; char *fext = FILE_SHARED_CARDS_INFO; char *fname = get_gbox_tmp_fname(fext); FILE *fhandle_shared; fhandle_shared = fopen(fname, "w"); if(!fhandle_shared) { cs_log("Couldn't open %s: %s", fname, strerror(errno)); return; } struct gbox_card *card; cs_readlock(__func__, &gbox_cards_lock); LL_ITER it = ll_iter_create(gbox_cards); while((card = ll_iter_next(&it))) { if (card->type == GBOX_CARD_TYPE_GBOX) { fprintf(fhandle_shared, "CardID %d at %s Card %08X Sl:%d Lev:%1d dist:%1d id:%04X\n", card_count_shared, card->origin_peer->hostname, card->caprovid, card->id.slot, card->lvl, card->dist, card->id.peer); card_count_shared++; } } cs_readunlock(__func__, &gbox_cards_lock); fclose(fhandle_shared); fext = FILE_BACKUP_CARDS_INFO; fname = get_gbox_tmp_fname(fext); FILE *fhandle_expired; fhandle_expired = fopen(fname, "w"); if(!fhandle_expired) { cs_log("Couldn't open %s: %s", fname, strerror(errno)); return; } cs_readlock(__func__, &gbox_cards_lock); LL_ITER it2 = ll_iter_create(gbox_backup_cards); while((card = ll_iter_next(&it2))) { if (card->type == GBOX_CARD_TYPE_GBOX) { fprintf(fhandle_expired, "CardID %2d at %s Card %08X Sl:%2d Lev:%1d dist:%1d id:%04X\n", card_count_expired, card->origin_peer->hostname, card->caprovid, card->id.slot, card->lvl, card->dist, card->id.peer); card_count_expired++; } } cs_readunlock(__func__, &gbox_cards_lock); fclose(fhandle_expired); return; }
static void * reader_check(void) { struct s_client *cl; struct s_reader *rdr; set_thread_name(__func__); pthread_mutex_init(&reader_check_sleep_cond_mutex, NULL); pthread_cond_init(&reader_check_sleep_cond, NULL); while (!exit_oscam) { for (cl=first_client->next; cl ; cl=cl->next) { if (!cl->thread_active) client_check_status(cl); } cs_readlock(&readerlist_lock); for (rdr=first_active_reader; rdr; rdr=rdr->next) { if (rdr->enable) { cl = rdr->client; if (!cl || cl->kill) restart_cardreader(rdr, 0); else if (!cl->thread_active) client_check_status(cl); } } cs_readunlock(&readerlist_lock); sleepms_on_cond(&reader_check_sleep_cond, &reader_check_sleep_cond_mutex, 1000); } return NULL; }
static void * card_poll(void) { struct s_client *cl; struct s_reader *rdr; pthread_mutex_t card_poll_sleep_cond_mutex; SAFE_MUTEX_INIT(&card_poll_sleep_cond_mutex, NULL); SAFE_COND_INIT(&card_poll_sleep_cond, NULL); set_thread_name(__func__); while (!exit_oscam) { cs_readlock(__func__, &readerlist_lock); for (rdr=first_active_reader; rdr; rdr=rdr->next) { if (rdr->enable && rdr->card_status == CARD_INSERTED) { cl = rdr->client; if (cl && !cl->kill) { add_job(cl, ACTION_READER_POLL_STATUS, 0, 0); } } } cs_readunlock(__func__, &readerlist_lock); struct timespec ts; struct timeval tv; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; ts.tv_sec += 1; SAFE_MUTEX_LOCK(&card_poll_sleep_cond_mutex); SAFE_COND_TIMEDWAIT(&card_poll_sleep_cond, &card_poll_sleep_cond_mutex, &ts); // sleep on card_poll_sleep_cond SAFE_MUTEX_UNLOCK(&card_poll_sleep_cond_mutex); } return NULL; }
void gbox_write_local_cards_info(void) { uint16_t card_count_local = 0; char *fext = FILE_LOCAL_CARDS_INFO; char *fname = get_gbox_tmp_fname(fext); FILE *fhandle_local; fhandle_local = fopen(fname, "w"); if(!fhandle_local) { cs_log("Couldn't open %s: %s", fname, strerror(errno)); return; } struct gbox_card *card; cs_readlock(__func__, &gbox_cards_lock); LL_ITER it = ll_iter_create(gbox_cards); while((card = ll_iter_next(&it))) { switch (card->type) { case GBOX_CARD_TYPE_GBOX: break; case GBOX_CARD_TYPE_LOCAL: fprintf(fhandle_local, "CardID:%2d %s %08X Sl:%2d id:%04X\n", card_count_local, "Local_Card", card->caprovid, card->id.slot, card->id.peer); card_count_local++; break; case GBOX_CARD_TYPE_BETUN: fprintf(fhandle_local, "CardID:%2d %s %08X Sl:%2d id:%04X\n", card_count_local, "Betun_Card", card->caprovid, card->id.slot, card->id.peer); card_count_local++; break; case GBOX_CARD_TYPE_CCCAM: fprintf(fhandle_local, "CardID:%2d %s %08X Sl:%2d id:%04X\n", card_count_local, "CCcam_Card", card->caprovid, card->id.slot, card->id.peer); card_count_local++; break; case GBOX_CARD_TYPE_PROXY: fprintf(fhandle_local, "CardID:%2d %s %08X Sl:%2d id:%04X\n", card_count_local, "Proxy_Card", card->caprovid, card->id.slot, card->id.peer); card_count_local++; break; default: cs_log("Invalid card type: %d in gbox_write_cards_info", card->type); break; } } cs_readunlock(__func__, &gbox_cards_lock); fclose(fhandle_local); }
void gbox_write_stats(void) { int32_t card_count = 0; struct gbox_good_srvid *srvid_good = NULL; struct gbox_bad_srvid *srvid_bad = NULL; char *fext = FILE_STATS; char *fname = get_gbox_tmp_fname(fext); FILE *fhandle; fhandle = fopen(fname, "w"); if(!fhandle) { cs_log("Couldn't open %s: %s", fname, strerror(errno)); return; } struct gbox_card *card; cs_readlock(__func__, &gbox_cards_lock); LL_ITER it = ll_iter_create(gbox_cards); while((card = ll_iter_next(&it))) { if (card->type == GBOX_CARD_TYPE_GBOX) { fprintf(fhandle, "CardID %4d Card %08X id:%04X #CWs:%d AVGtime:%d ms\n", card_count, card->caprovid, card->id.peer, card->no_cws_returned, card->average_cw_time); fprintf(fhandle, "Good SIDs:\n"); LL_ITER it2 = ll_iter_create(card->goodsids); while((srvid_good = ll_iter_next(&it2))) { fprintf(fhandle, "%04X\n", srvid_good->srvid.sid); } fprintf(fhandle, "Bad SIDs:\n"); it2 = ll_iter_create(card->badsids); while((srvid_bad = ll_iter_next(&it2))) { fprintf(fhandle, "%04X #%d\n", srvid_bad->srvid.sid, srvid_bad->bad_strikes); } card_count++; } } // end of while ll_iter_next cs_readunlock(__func__, &gbox_cards_lock); fclose(fhandle); return; }
static void *chkcache_process(void) { set_thread_name(__func__); time_t timeout; struct ecm_request_t *er, *ecm; #ifdef CS_CACHEEX uint8_t add_hitcache_er; struct s_reader *cl_rdr; struct s_reader *rdr; struct s_ecm_answer *ea; struct s_client *cex_src=NULL; #endif struct s_write_from_cache *wfc=NULL; while(1) { cs_readlock(&ecmcache_lock); for(er = ecmcwcache; er; er = er->next) { timeout = time(NULL)-((cfg.ctimeout+500)/1000+1); if(er->tps.time < timeout) { break; } if(er->rc<E_UNHANDLED || er->readers_timeout_check) //already answered { continue; } //******** CHECK IF FOUND ECM IN CACHE ecm = check_cache(er, er->client); if(ecm) //found in cache { #ifdef CS_CACHEEX //check for add_hitcache if(ecm->cacheex_src) //cw from cacheex { if((er->cacheex_wait_time && !er->cacheex_wait_time_expired) || !er->cacheex_wait_time) //only when no wait_time expires (or not wait_time) { //add_hitcache already called, but we check if we have to call it for these (er) caid|prid|srvid if(ecm->prid!=er->prid || ecm->srvid!=er->srvid) { cex_src = ecm->cacheex_src && is_valid_client(ecm->cacheex_src) && !ecm->cacheex_src->kill ? ecm->cacheex_src : NULL; //here we should be sure cex client has not been freed! if(cex_src) { //add_hitcache only if client is really active add_hitcache_er=1; cl_rdr = cex_src->reader; if(cl_rdr && cl_rdr->cacheex.mode == 2) { for(ea = er->matching_rdr; ea; ea = ea->next) { rdr = ea->reader; if(cl_rdr == rdr && ((ea->status & REQUEST_ANSWERED) == REQUEST_ANSWERED)) { cs_debug_mask(D_CACHEEX|D_CSP|D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [CACHEEX] skip ADD self request!", (check_client(er->client)?er->client->account->usr:"******"),er->caid, er->prid, er->srvid); add_hitcache_er=0; //don't add hit cache, reader requested self } } } if(add_hitcache_er) { add_hitcache(cex_src, er); //USE cacheex client (to get correct group) and ecm from requesting client (to get correct caid|prid|srvid)!!! } } } } else { //add_hitcache already called, but we have to remove it because cacheex not coming before wait_time if(ecm->prid==er->prid && ecm->srvid==er->srvid) { del_hitcache(ecm); } } } //END check for add_hitcache #endif if(check_client(er->client)) { wfc=NULL; if(!cs_malloc(&wfc, sizeof(struct s_write_from_cache))) { NULLFREE(ecm); continue; } wfc->er_new=er; wfc->er_cache=ecm; if(!add_job(er->client, ACTION_ECM_ANSWER_CACHE, wfc, sizeof(struct s_write_from_cache))) //write_ecm_answer_fromcache { NULLFREE(ecm); continue; } } else { NULLFREE(ecm); } } } cs_readunlock(&ecmcache_lock); cs_sleepms(10); } return NULL; }
/** * cacheex modes: * * cacheex=1 CACHE PULL: * Situation: oscam A reader1 has cacheex=1, oscam B account1 has cacheex=1 * oscam A gets a ECM request, reader1 send this request to oscam B, oscam B checks his cache * a. not found in cache: return NOK * a. found in cache: return OK+CW * b. not found in cache, but found pending request: wait max cacheexwaittime and check again * oscam B never requests new ECMs * * CW-flow: B->A * * cacheex=2 CACHE PUSH: * Situation: oscam A reader1 has cacheex=2, oscam B account1 has cacheex=2 * if oscam B gets a CW, its pushed to oscam A * reader has normal functionality and can request ECMs * * Problem: oscam B can only push if oscam A is connected * Problem or feature?: oscam A reader can request ecms from oscam B * * CW-flow: B->A * */ void cacheex_cache_push(ECM_REQUEST *er) { if(er->rc >= E_NOTFOUND) { return; } //cacheex=2 mode: push (server->remote) struct s_client *cl; cs_readlock(&clientlist_lock); for(cl = first_client->next; cl; cl = cl->next) { if(check_client(cl) && er->cacheex_src != cl) { if(get_module(cl)->num == R_CSP) // always send to csp cl { if(!er->cacheex_src || cfg.csp.allow_reforward) { cacheex_cache_push_to_client(cl, er); // but not if the origin was cacheex (might loop) } } else if(cl->typ == 'c' && !cl->dup && cl->account && cl->account->cacheex.mode == 2) //send cache over user { if(get_module(cl)->c_cache_push // cache-push able && (!er->grp || (cl->grp & er->grp)) //Group-check /**** OUTGOING FILTER CHECK ***/ && (!er->selected_reader || !cacheex_reader(er->selected_reader) || !cfg.block_same_name || strcmp(username(cl), er->selected_reader->label)) //check reader mode-1 loopback by same name && (!er->selected_reader || !cacheex_reader(er->selected_reader) || !cfg.block_same_ip || (check_client(er->selected_reader->client) && !IP_EQUAL(cl->ip, er->selected_reader->client->ip))) //check reader mode-1 loopback by same ip && (!cl->account->cacheex.drop_csp || checkECMD5(er)) //cacheex_drop_csp-check && chk_ctab(er->caid, &cl->ctab) //Caid-check && (!checkECMD5(er) || chk_ident_filter(er->caid, er->prid, &cl->ftab)) //Ident-check (not for csp: prid=0 always!) && chk_srvid(cl, er) //Service-check ) { cacheex_cache_push_to_client(cl, er); } } } } cs_readunlock(&clientlist_lock); //cacheex=3 mode: reverse push (reader->server) cs_readlock(&readerlist_lock); cs_readlock(&clientlist_lock); struct s_reader *rdr; for(rdr = first_active_reader; rdr; rdr = rdr->next) { cl = rdr->client; if(check_client(cl) && er->cacheex_src != cl && rdr->cacheex.mode == 3) //send cache over reader { if(rdr->ph.c_cache_push // cache-push able && (!er->grp || (rdr->grp & er->grp)) //Group-check /**** OUTGOING FILTER CHECK ***/ && (!er->selected_reader || !cacheex_reader(er->selected_reader) || !cfg.block_same_name || strcmp(username(cl), er->selected_reader->label)) //check reader mode-1 loopback by same name && (!er->selected_reader || !cacheex_reader(er->selected_reader) || !cfg.block_same_ip || (check_client(er->selected_reader->client) && !IP_EQUAL(cl->ip, er->selected_reader->client->ip))) //check reader mode-1 loopback by same ip && (!rdr->cacheex.drop_csp || checkECMD5(er)) //cacheex_drop_csp-check && chk_ctab(er->caid, &rdr->ctab) //Caid-check && (!checkECMD5(er) || chk_ident_filter(er->caid, er->prid, &rdr->ftab)) //Ident-check (not for csp: prid=0 always!) && chk_srvid(cl, er) //Service-check ) { cacheex_cache_push_to_client(cl, er); } } } cs_readunlock(&clientlist_lock); cs_readunlock(&readerlist_lock); }
void gbox_cards_iter_destroy(GBOX_CARDS_ITER *gci) { cs_readunlock(__func__, &gbox_cards_lock); if (gci) { add_garbage(gci); } }