/* This function allows to reinit the in-memory loghistory with a new size. */ void cs_reinit_loghist(uint32_t size) { char *tmp = NULL, *tmp2; if(size != cfg.loghistorysize){ if(size == 0 || cs_malloc(&tmp, size, -1)){ cs_writelock(&loghistory_lock); tmp2 = loghist; // On shrinking, the log is not copied and the order is reversed if(size < cfg.loghistorysize){ cfg.loghistorysize = size; cs_sleepms(20); // Monitor or webif may be currently outputting the loghistory but don't use locking so we sleep a bit... loghistptr = tmp; loghist = tmp; } else { if(loghist){ memcpy(tmp, loghist, cfg.loghistorysize); loghistptr = tmp + (loghistptr - loghist); } else loghistptr = tmp; loghist = tmp; cs_sleepms(20); // Monitor or webif may be currently outputting the loghistory but don't use locking so we sleep a bit... cfg.loghistorysize = size; } cs_writeunlock(&loghistory_lock); if(tmp2 != NULL) add_garbage(tmp2); } } }
void cs_accounts_chk(void) { struct s_auth *account1, *account2; struct s_auth *new_accounts = init_userdb(); cs_writelock(__func__, &config_lock); struct s_auth *old_accounts = cfg.account; for(account1 = cfg.account; account1; account1 = account1->next) { for(account2 = new_accounts; account2; account2 = account2->next) { if(!strcmp(account1->usr, account2->usr)) { account2->cwfound = account1->cwfound; account2->cwcache = account1->cwcache; account2->cwnot = account1->cwnot; account2->cwtun = account1->cwtun; account2->cwignored = account1->cwignored; account2->cwtout = account1->cwtout; account2->emmok = account1->emmok; account2->emmnok = account1->emmnok; account2->firstlogin = account1->firstlogin; ac_copy_vars(account1, account2); } } } cs_reinit_clients(new_accounts); cfg.account = new_accounts; init_free_userdb(old_accounts); ac_clear(); cs_writeunlock(__func__, &config_lock); }
static void SSL_locking_function(int32_t mode, int32_t type, const char *file, int32_t line) { if(mode & CRYPTO_LOCK) { cs_writelock(&lock_cs[type]); } else { cs_writeunlock(&lock_cs[type]); } // just to remove compiler warnings... if(file || line) { return; } }
struct s_client *create_client(IN_ADDR_T ip) { struct s_client *cl; if(!cs_malloc(&cl, sizeof(struct s_client))) { cs_log("max connections reached (out of memory) -> reject client %s", IP_ISSET(ip) ? cs_inet_ntoa(ip) : "with null address"); return NULL; } //client part IP_ASSIGN(cl->ip, ip); cl->account = first_client->account; //master part pthread_mutex_init(&cl->thread_lock, NULL); cl->login = cl->last = time(NULL); cl->tid = (uint32_t)(uintptr_t)cl; // Use pointer adress of client as threadid (for monitor and log) //Now add new client to the list: struct s_client *last; cs_writelock(&clientlist_lock); if(sizeof(uintptr_t) > 4) // 64bit systems can have collisions because of the cast so lets check if there are some { int8_t found; do { found = 0; for(last = first_client; last; last = last->next) { if(last->tid == cl->tid) { found = 1; break; } } if(found || cl->tid == 0) { cl->tid = (uint32_t)rand(); } } while(found || cl->tid == 0); } for(last = first_client; last->next != NULL; last = last->next) { ; } //ends with cl on last client last->next = cl; int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS; cl->nexthashed = first_client_hashed[bucket]; first_client_hashed[bucket] = cl; #ifdef MODULE_GBOX cl->gbox_peer_id = 0; #endif cs_writeunlock(&clientlist_lock); return cl; }
void init_gbox_cards(void) { gbox_cards = ll_create("gbox.cards"); gbox_backup_cards = ll_create("gbox.backup.cards"); cs_lock_create(__func__, &gbox_cards_lock, "gbox_cards_lock", 5000); cs_writelock(__func__, &gbox_cards_lock); checkcode[0] = 0x15; checkcode[1] = 0x30; checkcode[2] = 0x02; checkcode[3] = 0x04; checkcode[4] = 0x19; checkcode[5] = 0x19; checkcode[6] = 0x66; cs_writeunlock(__func__, &gbox_cards_lock); }
/* Drop-in replacement for readdir_r as some plattforms strip the function from their libc. Furthermore, there are some security issues, see http://womble.decadent.org.uk/readdir_r-advisory.html */ int32_t cs_readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result) { /* According to POSIX the buffer readdir uses is not shared between directory streams. However readdir is not guaranteed to be thread-safe and some implementations may use global state. Thus we use a lock as we have many plattforms... */ int32_t rc; cs_writelock(&readdir_lock); errno = 0; *result = readdir(dirp); rc = errno; if (errno == 0 && *result != NULL) { memcpy(entry, *result, sizeof(struct dirent)); *result = entry; } cs_writeunlock(&readdir_lock); return rc; }
/* Starts a thread named nameroutine with the start function startroutine. */ void start_thread(void * startroutine, char * nameroutine) { pthread_t temp; pthread_attr_t attr; pthread_attr_init(&attr); cs_debug_mask(D_TRACE, "starting thread %s", nameroutine); pthread_attr_setstacksize(&attr, PTHREAD_STACK_SIZE); cs_writelock(&system_lock); int32_t ret = pthread_create(&temp, &attr, startroutine, NULL); if (ret) cs_log("ERROR: can't create %s thread (errno=%d %s)", nameroutine, ret, strerror(ret)); else { cs_debug_mask(D_TRACE, "%s thread started", nameroutine); pthread_detach(temp); } pthread_attr_destroy(&attr); cs_writeunlock(&system_lock); }
struct s_client *create_client(IN_ADDR_T ip) { struct s_client *cl; if(!cs_malloc(&cl, sizeof(struct s_client))) { cs_log("max connections reached (out of memory) -> reject client %s", IP_ISSET(ip) ? cs_inet_ntoa(ip) : "with null address"); return NULL; } //client part IP_ASSIGN(cl->ip, ip); cl->account = first_client->account; //master part SAFE_MUTEX_INIT(&cl->thread_lock, NULL); cl->login = cl->last = time(NULL); cl->tid = (uint32_t)rand(); //Now add new client to the list: struct s_client *last; cs_writelock(__func__, &clientlist_lock); for(last = first_client; last && last->next; last = last->next) { ; } //ends with cl on last client if (last) last->next = cl; int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS; cl->nexthashed = first_client_hashed[bucket]; first_client_hashed[bucket] = cl; cs_writeunlock(__func__, &clientlist_lock); return cl; }
static void cs_fake_client(struct s_client *client, char *usr, int32_t uniq, IN_ADDR_T ip) { /* Uniq = 1: only one connection per user * * Uniq = 2: set (new connected) user only to fake if source * ip is different (e.g. for newcamd clients with * different CAID's -> Ports) * * Uniq = 3: only one connection per user, but only the last * login will survive (old mpcs behavior) * * Uniq = 4: set user only to fake if source ip is * different, but only the last login will survive */ struct s_client *cl; struct s_auth *account; cs_writelock(&fakeuser_lock); for (cl = first_client->next; cl; cl = cl->next) { account = cl->account; if (cl != client && cl->typ == 'c' && !cl->dup && account && streq(account->usr, usr) && uniq < 5 && ((uniq % 2) || !IP_EQUAL(cl->ip, ip))) { char buf[20]; if (uniq == 3 || uniq == 4) { cl->dup = 1; cl->aureader_list = NULL; cs_strncpy(buf, cs_inet_ntoa(cl->ip), sizeof(buf)); cs_log("client(%8lX) duplicate user '%s' from %s (prev %s) set to fake (uniq=%d)", (unsigned long)cl->thread, usr, cs_inet_ntoa(ip), buf, uniq); if (cl->failban & BAN_DUPLICATE) { cs_add_violation(cl, usr); } if (cfg.dropdups) { cs_writeunlock(&fakeuser_lock); cs_sleepms(100); // sleep a bit to prevent against saturation from fast reconnecting clients kill_thread(cl); cs_writelock(&fakeuser_lock); } } else { client->dup = 1; client->aureader_list = NULL; cs_strncpy(buf, cs_inet_ntoa(ip), sizeof(buf)); cs_log("client(%8lX) duplicate user '%s' from %s (current %s) set to fake (uniq=%d)", (unsigned long)pthread_self(), usr, cs_inet_ntoa(cl->ip), buf, uniq); if (client->failban & BAN_DUPLICATE) { cs_add_violation_by_ip(ip, get_module(client)->ptab->ports[client->port_idx].s_port, usr); } if (cfg.dropdups) { cs_writeunlock(&fakeuser_lock); // we need to unlock here as cs_disconnect_client kills the current thread! cs_sleepms(100); // sleep a bit to prevent against saturation from fast reconnecting clients cs_disconnect_client(client); cs_writelock(&fakeuser_lock); } break; } } } cs_writeunlock(&fakeuser_lock); }
static int32_t cacheex_add_to_cache_int(struct s_client *cl, ECM_REQUEST *er, int8_t csp) { if(er->rc >= E_NOTFOUND) { return 0; } if(!cl) { return 0; } if(!csp && cl->reader && cl->reader->cacheex.mode != 2) //from reader { cs_log_dbg(D_CACHEEX, "CACHEX received, but disabled for %s", username(cl)); return 0; } if(!csp && !cl->reader && cl->account && cl->account->cacheex.mode != 3) //from user { cs_log_dbg(D_CACHEEX, "CACHEX received, but disabled for %s", username(cl)); return 0; } if(!csp && !cl->reader && !cl->account) //not active! { cs_log_dbg(D_CACHEEX, "CACHEX received, but invalid client state %s", username(cl)); return 0; } if(!cfg.disablecrccws && ((cl->typ == 'c' && !cl->account->disablecrccacheex) || ( cl->typ == 'p' && !cl->reader->disablecrccws))) { uint8_t selectedForIgnChecksum = chk_if_ignore_checksum(er, &cfg.disablecrccws_only_for); if(cl->typ == 'c') { selectedForIgnChecksum += chk_if_ignore_checksum(er, &cl->account->disablecrccacheex_only_for); } if(cl->typ == 'p') { selectedForIgnChecksum += chk_if_ignore_checksum(er, &cl->reader->disablecrccws_only_for); } if(!selectedForIgnChecksum) { uint8_t i, c; for(i = 0; i < 16; i += 4) { c = ((er->cw[i] + er->cw[i + 1] + er->cw[i + 2]) & 0xff); if(er->cw[i + 3] != c) { cs_log_dump_dbg(D_CACHEEX, er->cw, 16, "push received cw with chksum error from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } } } } // Skip check for BISS1 - cw could be indeed zero // Skip check for BISS2 - we use the extended cw, so the "simple" cw is always zero if(chk_is_null_CW(er->cw) && !caid_is_biss(er->caid)) { cs_log_dump_dbg(D_CACHEEX, er->cw, 16, "push received null cw from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } // Don't check for BISS1 and BISS2 mode 1/E or fake caid (ECM is fake for them) // Don't check for BISS2 mode CA (ECM table is always 0x80) if(!caid_is_biss(er->caid) && !caid_is_fake(er->caid) && get_odd_even(er) == 0) { cs_log_dbg(D_CACHEEX, "push received ecm with null odd/even byte from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } if(!chk_halfCW(er, er->cw)) { log_cacheex_cw(er, "bad half cw"); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } if((csp && cfg.csp.block_fakecws) || (cl->reader && cl->reader->cacheex.block_fakecws) || (!cl->reader && cl->account && cl->account->cacheex.block_fakecws)) { if(chk_is_fakecw(er->cw)) { cs_log_dbg(D_CACHEEX, "push received fake cw from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } } er->grp |= cl->grp; // ok for mode2 reader too: cl->reader->grp er->rc = E_CACHEEX; er->cacheex_src = cl; er->selected_reader = cl->reader; er->client = NULL; // No Owner! So no fallback! if(check_client(cl)) { cl->cwcacheexgot++; if(cl->account) { cl->account->cwcacheexgot++; } first_client->cwcacheexgot++; } cacheex_add_hitcache(cl, er); // we have to call it before add_cache, because in chk_process we could remove it! add_cache(er); cacheex_add_stats(cl, er->caid, er->srvid, er->prid, 1); cs_writelock(__func__, &ecm_pushed_deleted_lock); er->next = ecm_pushed_deleted; ecm_pushed_deleted = er; cs_writeunlock(__func__, &ecm_pushed_deleted_lock); return 1; // NO free, we have to wait cache push out stuff ends. }
static void write_to_log(char *txt, struct s_log *log, int8_t do_flush) { char sbuf[16]; #ifdef CS_ANTICASC if (!strncmp(txt + log->header_len, "acasc:", 6)) { strcat(txt, "\n"); switch_log(cfg.ac_logfile, &fpa, ac_init_log); if (fpa) { fputs(txt + 8, fpa); if (do_flush) fflush(fpa); } } else #endif { if (cfg.logtosyslog) syslog(LOG_INFO, "%s", txt+24); strcat(txt, "\n"); } cs_write_log(txt + 8, do_flush); #if defined(WEBIF) || defined(MODULE_MONITOR) if (loghist && exit_oscam != 1) { char *usrtxt = log->cl_text; char *target_ptr = NULL; int32_t target_len = strlen(usrtxt) + (strlen(txt) - 8) + 1; cs_writelock(&loghistory_lock); char *lastpos = loghist + (cfg.loghistorysize) - 1; if(loghist + target_len + 1 >= lastpos){ strncpy(txt + 39, "Log entry too long!", strlen(txt) - 39); // we can assume that the min loghistorysize is always 1024 so we don't need to check if this new string fits into it! target_len = strlen(usrtxt) + (strlen(txt) - 8) + 1; } if (!loghistptr) loghistptr = loghist; if (loghistptr + target_len + 1 > lastpos) { *loghistptr='\0'; loghistptr=loghist + target_len + 1; *loghistptr='\0'; target_ptr=loghist; } else { target_ptr = loghistptr; loghistptr=loghistptr + target_len + 1; *loghistptr='\0'; } cs_writeunlock(&loghistory_lock); snprintf(target_ptr, target_len + 1, "%s\t%s", usrtxt, txt + 8); } #endif struct s_client *cl; for (cl=first_client; cl ; cl=cl->next) { if ((cl->typ == 'm') && (cl->monlvl>0) && cl->log) //this variable is only initialized for cl->typ = 'm' { if (cl->monlvl<2) { if (log->cl_typ != 'c' && log->cl_typ != 'm') continue; if (log->cl_usr && cl->account && strcmp(log->cl_usr, cl->account->usr)) continue; } snprintf(sbuf, sizeof(sbuf), "%03d", cl->logcounter); cl->logcounter = (cl->logcounter+1) % 1000; memcpy(txt + 4, sbuf, 3); #ifdef MODULE_MONITOR monitor_send_idx(cl, txt); #endif } } }
int32_t ICC_Async_Device_Init (struct s_reader *reader) { reader->fdmc=-1; cs_debug_mask (D_IFD, "IFD: Opening device %s\n", reader->device); reader->written = 0; if (reader->crdr.active==1 && reader->crdr.reader_init) { return reader->crdr.reader_init(reader); } switch(reader->typ) { case R_SC8in1: cs_writelock(&sc8in1_lock); if (reader->handle != 0) {//this reader is already initialized cs_writeunlock(&sc8in1_lock); return OK; } //this reader is uninitialized, thus the first one, since the first one initializes all others //get physical device name int32_t pos = strlen(reader->device)-2; //this is where : should be located; is also valid length of physical device name if (reader->device[pos] != 0x3a) //0x3a = ":" cs_log("ERROR: '%c' detected instead of slot separator `:` at second to last position of device %s", reader->device[pos], reader->device); reader->slot=(int)reader->device[pos+1] - 0x30; reader->device[pos]= 0; //slot 1 reader now gets correct physicalname //open physical device reader->handle = open (reader->device, O_RDWR | O_NOCTTY| O_NONBLOCK); if (reader->handle < 0) { cs_log("ERROR opening device %s",reader->device); cs_writeunlock(&sc8in1_lock); return ERROR; } //copy physical device name and file handle to other slots struct s_reader *rdr; LL_ITER itr = ll_iter_create(configured_readers); while((rdr = ll_iter_next(&itr))) //copy handle to other slots if (rdr->typ == R_SC8in1 && rdr != reader) { //we have another sc8in1 reader unsigned char save = rdr->device[pos]; rdr->device[pos]=0; //set to 0 so we can compare device names if (!strcmp(reader->device, rdr->device)) {//we have a match to another slot with same device name rdr->handle = reader->handle; rdr->slot=(int)rdr->device[pos+1] - 0x30; } else rdr->device[pos] = save; //restore character } break; case R_MP35: case R_MOUSE: reader->handle = open (reader->device, O_RDWR | O_NOCTTY| O_NONBLOCK); if (reader->handle < 0) { cs_log("ERROR opening device %s",reader->device); return ERROR; } break; #if defined(TUXBOX) && defined(PPC) case R_DB2COM1: case R_DB2COM2: reader->handle = open (reader->device, O_RDWR | O_NOCTTY| O_SYNC); if (reader->handle < 0) { cs_log("ERROR opening device %s",reader->device); return ERROR; } if ((reader->fdmc = open(DEV_MULTICAM, O_RDWR)) < 0) { close(reader->handle); cs_log("ERROR opening device %s",DEV_MULTICAM); return ERROR; } break; #endif case R_SMART: #if defined(LIBUSB) call (SR_Init(reader)); break; #else cs_log("ERROR, you have specified 'protocol = smartreader' in oscam.server,"); cs_log("recompile with SmartReader support."); return ERROR; #endif case R_INTERNAL: #if defined(COOL) return Cool_Init(reader->device); #elif defined(AZBOX) return Azbox_Init(reader); #elif defined(SCI_DEV) #if defined(SH4) || defined(STB04SCI) reader->handle = open (reader->device, O_RDWR|O_NONBLOCK|O_NOCTTY); #else reader->handle = open (reader->device, O_RDWR); #endif if (reader->handle < 0) { cs_log("ERROR opening device %s",reader->device); return ERROR; } #elif defined(WITH_STAPI) return STReader_Open(reader->device, &reader->stsmart_handle); #else//SCI_DEV cs_log("ERROR, you have specified 'protocol = internal' in oscam.server,"); cs_log("recompile with internal reader support."); return ERROR; #endif//SCI_DEV break; #ifdef HAVE_PCSC case R_PCSC: return (pcsc_reader_init(reader, reader->device)); break; #endif default: cs_log("ERROR ICC_Device_Init: unknow reader type %i",reader->typ); return ERROR; } if (reader->typ == R_MP35) { if (MP35_Init(reader)) { cs_log("ERROR: MP35_Init returns error"); MP35_Close (reader); return ERROR; } } else if (reader->typ <= R_MOUSE) if (Phoenix_Init(reader)) { cs_log("ERROR: Phoenix_Init returns error"); Phoenix_Close (reader); return ERROR; } if (reader->typ == R_SC8in1) { call(Sc8in1_Init(reader)); cs_writeunlock(&sc8in1_lock); } cs_debug_mask (D_IFD, "IFD: Device %s succesfully opened\n", reader->device); return OK; }
static int32_t cacheex_add_to_cache_int(struct s_client *cl, ECM_REQUEST *er, int8_t csp) { if(er->rc >= E_NOTFOUND) { return 0; } if(!cl) { return 0; } if(!csp && cl->reader && cl->reader->cacheex.mode != 2) //from reader { cs_debug_mask(D_CACHEEX, "CACHEX received, but disabled for %s", username(cl)); return 0; } if(!csp && !cl->reader && cl->account && cl->account->cacheex.mode != 3) //from user { cs_debug_mask(D_CACHEEX, "CACHEX received, but disabled for %s", username(cl)); return 0; } if(!csp && !cl->reader && !cl->account) //not active! { cs_debug_mask(D_CACHEEX, "CACHEX received, but invalid client state %s", username(cl)); return 0; } uint8_t i, c; uint8_t null = 0; for(i = 0; i < 16; i += 4) { c = ((er->cw[i] + er->cw[i + 1] + er->cw[i + 2]) & 0xff); null |= (er->cw[i] | er->cw[i + 1] | er->cw[i + 2]); if(er->cw[i + 3] != c) { cs_ddump_mask(D_CACHEEX, er->cw, 16, "push received cw with chksum error from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } } if(null == 0 || chk_is_null_CW(er->cw)) { cs_ddump_mask(D_CACHEEX, er->cw, 16, "push received null cw from %s", csp ? "csp" : username(cl)); cl->cwcacheexerr++; if(cl->account) { cl->account->cwcacheexerr++; } return 0; } er->grp |= cl->grp; //ok for mode2 reader too: cl->reader->grp er->rc = E_CACHEEX; er->cacheex_src = cl; er->selected_reader = cl->reader; er->client = NULL; //No Owner! So no fallback! if(check_client(cl)) { cl->cwcacheexgot++; if(cl->account) { cl->account->cwcacheexgot++; } first_client->cwcacheexgot++; } add_hitcache(cl, er); //we have to call it before add_cache, because in chk_process we could remove it! add_cache(er); cacheex_add_stats(cl, er->caid, er->srvid, er->prid, 1); cs_writelock(&ecm_pushed_deleted_lock); er->next = ecm_pushed_deleted; ecm_pushed_deleted = er; cs_writeunlock(&ecm_pushed_deleted_lock); return 1; //NO free, we have to wait cache push out stuff ends. }
int32_t ICC_Async_GetStatus (struct s_reader *reader, int32_t * card) { int32_t in=0; if (reader->crdr.active==1 && reader->crdr.get_status) { call(reader->crdr.get_status(reader, &in)); if (in) *card = TRUE; else *card = FALSE; return OK; } switch(reader->typ) { case R_DB2COM1: case R_DB2COM2: #if defined(TUXBOX) && defined(PPC) { uint16_t msr=1; IO_Serial_Ioctl_Lock(reader, 1); ioctl(reader->fdmc, GET_PCDAT, &msr); if (reader->typ == R_DB2COM2) in=(!(msr & 1)); else in=((msr & 0x0f00) == 0x0f00); IO_Serial_Ioctl_Lock(reader, 0); } break; #endif case R_SC8in1: cs_writelock(&sc8in1_lock); call (Sc8in1_GetStatus(reader, &in)); cs_writeunlock(&sc8in1_lock); break; case R_MP35: case R_MOUSE: call (Phoenix_GetStatus(reader, &in)); break; #if defined(LIBUSB) case R_SMART: call (SR_GetStatus(reader, &in)); break; #endif case R_INTERNAL: #if defined(SCI_DEV) call (Sci_GetStatus(reader, &in)); #elif defined(COOL) call (Cool_GetStatus(&in)); #elif defined(WITH_STAPI) call (STReader_GetStatus(reader->stsmart_handle, &in)); #elif defined(AZBOX) call(Azbox_GetStatus(reader, &in)); #endif break; #ifdef HAVE_PCSC case R_PCSC: in = pcsc_check_card_inserted(reader); break; #endif default: cs_log("ERROR ICC_Get_Status: unknow reader type %i",reader->typ); return ERROR; } if (in) *card = TRUE; else *card = FALSE; return OK; }
int32_t init_provid(void) { FILE *fp = open_config_file(cs_provid); if(!fp) { return 0; } int32_t nr; char *payload, *saveptr1 = NULL, *token; if(!cs_malloc(&token, MAXLINESIZE)) { return 0; } struct s_provid *provid_ptr = NULL; struct s_provid *new_cfg_provid = NULL, *last_provid; nr = 0; while(fgets(token, MAXLINESIZE, fp)) { int32_t i, l; struct s_provid *new_provid = NULL; char *tmp, *ptr1; tmp = trim(token); if(tmp[0] == '#') { continue; } if((l = strlen(tmp)) < 11) { continue; } if(!(payload = strchr(token, '|'))) { continue; } *payload++ = '\0'; if(!cs_malloc(&new_provid, sizeof(struct s_provid))) { NULLFREE(token); fclose(fp); return (1); } new_provid->nprovid = 0; for(i = 0, ptr1 = strtok_r(token, ":@", &saveptr1); ptr1; ptr1 = strtok_r(NULL, ":@", &saveptr1), i++) { if(i==0) { new_provid->caid = a2i(ptr1, 3); continue; } new_provid->nprovid++; } if(!cs_malloc(&new_provid->provid, sizeof(uint32_t) * new_provid->nprovid)) { NULLFREE(new_provid); NULLFREE(token); fclose(fp); return (1); } ptr1 = token + strlen(token) + 1; for(i = 0; i < new_provid->nprovid ; i++) { new_provid->provid[i] = a2i(ptr1, 3); ptr1 = ptr1 + strlen(ptr1) + 1; } for(i = 0, ptr1 = strtok_r(payload, "|", &saveptr1); ptr1; ptr1 = strtok_r(NULL, "|", &saveptr1), i++) { switch(i) { case 0: cs_strncpy(new_provid->prov, trim(ptr1), sizeof(new_provid->prov)); break; case 1: cs_strncpy(new_provid->sat, trim(ptr1), sizeof(new_provid->sat)); break; case 2: cs_strncpy(new_provid->lang, trim(ptr1), sizeof(new_provid->lang)); break; } } if(strlen(new_provid->prov) == 0) { NULLFREE(new_provid->provid); NULLFREE(new_provid); continue; } nr++; if(provid_ptr) { provid_ptr->next = new_provid; } else { new_cfg_provid = new_provid; } provid_ptr = new_provid; } NULLFREE(token); fclose(fp); if(nr > 0) { cs_log("%d provid's loaded", nr); } if(new_cfg_provid == NULL) { if(!cs_malloc(&new_cfg_provid, sizeof(struct s_provid))) { return (1); } } cs_writelock(__func__, &config_lock); //this allows reloading of provids, so cleanup of old data is needed: last_provid = cfg.provid; //old data cfg.provid = new_cfg_provid; //assign after loading, so everything is in memory cs_writeunlock(__func__, &config_lock); struct s_client *cl; for(cl = first_client->next; cl ; cl = cl->next) { cl->last_providptr = NULL; } struct s_provid *ptr, *nptr; if(last_provid) { ptr = last_provid; while(ptr) //cleanup old data: { add_garbage(ptr->provid); nptr = ptr->next; add_garbage(ptr); ptr = nptr; } } return (0); }
void free_client(struct s_client *cl) { if (!cl) return; struct s_reader *rdr = cl->reader; // Remove client from client list. kill_thread also removes this client, so here just if client exits itself... struct s_client *prev, *cl2; cs_writelock(&clientlist_lock); cl->kill = 1; for (prev = first_client, cl2 = first_client->next; prev->next != NULL; prev = prev->next, cl2 = cl2->next) { if (cl == cl2) break; } if (cl == cl2) prev->next = cl2->next; // Remove client from list int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS; // Remove client from hashed list if (first_client_hashed[bucket] == cl) { first_client_hashed[bucket] = cl->nexthashed; } else { for (prev = first_client_hashed[bucket], cl2 = first_client_hashed[bucket]->nexthashed; prev->nexthashed != NULL; prev = prev->nexthashed, cl2 = cl2->nexthashed) { if (cl == cl2) break; } if (cl == cl2) prev->nexthashed = cl2->nexthashed; } cs_writeunlock(&clientlist_lock); // Clean reader. The cleaned structures should be only used by the reader thread, so we should be save without waiting if (rdr) { remove_reader_from_ecm(rdr); remove_reader_from_active(rdr); if(rdr->ph.cleanup) rdr->ph.cleanup(cl); if (cl->typ == 'r') cardreader_close(rdr); if (cl->typ == 'p') network_tcp_connection_close(rdr, "cleanup"); cl->reader = NULL; } // Clean client specific data if (cl->typ == 'c') { cs_statistics(cl); cl->last_caid = 0xFFFF; cl->last_srvid = 0xFFFF; cs_statistics(cl); cs_sleepms(500); //just wait a bit that really really nobody is accessing client data struct s_module *module = get_module(cl); if (module->cleanup) module->cleanup(cl); } // Close network socket if not already cleaned by previous cleanup functions if (cl->pfd) close(cl->pfd); // Clean all remaining structures free_joblist(cl); cleanup_ecmtasks(cl); add_garbage(cl->emmcache); #ifdef MODULE_CCCAM add_garbage(cl->cc); #endif #ifdef MODULE_SERIAL add_garbage(cl->serialdata); #endif add_garbage(cl); }
void free_client(struct s_client *cl) { if(!cl) { return; } struct s_reader *rdr = cl->reader; // Remove client from client list. kill_thread also removes this client, so here just if client exits itself... struct s_client *prev, *cl2; cs_writelock(__func__, &clientlist_lock); if(!cl->kill_started) { cl->kill_started = 1; } else { cs_writeunlock(__func__, &clientlist_lock); cs_log("[free_client] ERROR: free already started!"); return; } cl->kill = 1; for(prev = first_client, cl2 = first_client->next; prev->next != NULL; prev = prev->next, cl2 = cl2->next) { if(cl == cl2) { break; } } if(cl == cl2) { prev->next = cl2->next; } // Remove client from list int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS; // Remove client from hashed list if(first_client_hashed[bucket] == cl) { first_client_hashed[bucket] = cl->nexthashed; } else { for(prev = first_client_hashed[bucket], cl2 = first_client_hashed[bucket]->nexthashed; prev->nexthashed != NULL; prev = prev->nexthashed, cl2 = cl2->nexthashed) { if(cl == cl2) { break; } } if(cl == cl2) { prev->nexthashed = cl2->nexthashed; } } cs_writeunlock(__func__, &clientlist_lock); cleanup_ecmtasks(cl); // Clean reader. The cleaned structures should be only used by the reader thread, so we should be save without waiting if(rdr) { ll_destroy_data(&rdr->emmstat); remove_reader_from_active(rdr); cs_sleepms(1000); //just wait a bit that really really nobody is accessing client data if(rdr->ph.cleanup) { rdr->ph.cleanup(cl); } if(cl->typ == 'r') { cardreader_close(rdr); } if(cl->typ == 'p') { network_tcp_connection_close(rdr, "cleanup"); } cl->reader = NULL; } // Clean client specific data if(cl->typ == 'c') { cs_statistics(cl); cl->last_caid = NO_CAID_VALUE; cl->last_provid = NO_PROVID_VALUE; cl->last_srvid = NO_SRVID_VALUE; cs_statistics(cl); cs_sleepms(1000); //just wait a bit that really really nobody is accessing client data } struct s_module *module = get_module(cl); if(module->cleanup) { module->cleanup(cl); } // Close network socket if not already cleaned by previous cleanup functions if(cl->pfd) { close(cl->pfd); } // Clean all remaining structures free_joblist(cl); NULLFREE(cl->work_mbuf); if(cl->ecmtask) { add_garbage(cl->ecmtask); cl->ecmtask = NULL; } ll_destroy_data(&cl->cascadeusers); ftab_clear(&cl->ftab); ftab_clear(&cl->fchid); tuntab_clear(&cl->ttab); caidtab_clear(&cl->ctab); NULLFREE(cl->cltab.aclass); NULLFREE(cl->cltab.bclass); NULLFREE(cl->cw_rass); ll_destroy_data(&cl->ra_buf); NULLFREE(cl->aes_keys); #ifdef MODULE_CCCAM add_garbage(cl->cc); #endif #ifdef MODULE_SERIAL add_garbage(cl->serialdata); #endif add_garbage(cl); }
/* Calculates the currently valid nonce value and copies it to result. Please note that nonce (may be NULL), opaque and result needs to be at least (MD5_DIGEST_LENGTH * 2) + 1 large. */ void calculate_nonce(char *nonce, char *result, char *opaque) { struct s_nonce *noncelist, *prev, *foundnonce = NULL, *foundopaque = NULL, *foundexpired = NULL; int32_t bucket = opaque[0] % AUTHNONCEHASHBUCKETS; time_t now = time(NULL); cs_writelock(&nonce_lock[bucket]); for(noncelist = nonce_first[bucket], prev = NULL; noncelist; prev = noncelist, noncelist = noncelist->next) { if(now > noncelist->expirationdate) { if(prev) { prev->next = NULL; } else { nonce_first[bucket] = NULL; } foundexpired = noncelist; break; } if(nonce && !memcmp(noncelist->nonce, nonce, (MD5_DIGEST_LENGTH * 2) + 1)) { memcpy(result, noncelist->nonce, (MD5_DIGEST_LENGTH * 2) + 1); foundnonce = noncelist; if(!noncelist->firstuse) { noncelist->firstuse = now; } else if(now - foundnonce->firstuse > AUTHNONCEVALIDSECS) { if(prev) { prev->next = noncelist->next; } else { nonce_first[bucket] = noncelist->next; } } break; } else if(!noncelist->firstuse && !memcmp(noncelist->opaque, opaque, (MD5_DIGEST_LENGTH * 2) + 1)) { foundopaque = noncelist; } } if(foundnonce && now - foundnonce->firstuse > AUTHNONCEVALIDSECS) { NULLFREE(foundnonce); foundnonce = NULL; } if(!foundnonce && foundopaque) { memcpy(result, foundopaque->nonce, (MD5_DIGEST_LENGTH * 2) + 1); } if(!foundnonce && !foundopaque) { char noncetmp[128], randstr[16]; unsigned char md5tmp[MD5_DIGEST_LENGTH]; get_random_bytes((uint8_t *)randstr, sizeof(randstr) - 1); randstr[sizeof(randstr) - 1] = '\0'; snprintf(noncetmp, sizeof(noncetmp), "%d:%s:%s", (int32_t)now, randstr, noncekey); char_to_hex(MD5((unsigned char *)noncetmp, strlen(noncetmp), md5tmp), MD5_DIGEST_LENGTH, (unsigned char *)result); if(cs_malloc(&noncelist, sizeof(struct s_nonce))) { noncelist->expirationdate = now + AUTHNONCEEXPIRATION; memcpy(noncelist->nonce, result, (MD5_DIGEST_LENGTH * 2) + 1); memcpy(noncelist->opaque, opaque, (MD5_DIGEST_LENGTH * 2) + 1); noncelist->next = nonce_first[bucket]; nonce_first[bucket] = noncelist; } } cs_writeunlock(&nonce_lock[bucket]); while(foundexpired) { prev = foundexpired; foundexpired = foundexpired->next; NULLFREE(prev); } }