int32_t coolapi_open_device(int32_t demux_index, int32_t demux_id) { dmx_t *dmx; SAFE_MUTEX_LOCK(&demux_lock); dmx = find_demux(0, demux_index); if(!dmx) { SAFE_MUTEX_UNLOCK(&demux_lock); cs_log("no free demux found"); return 0; } if(!ll_cool_filter) { ll_cool_filter = ll_create("ll_cool_filter"); } if(!ll_cool_chanhandle) { ll_cool_chanhandle = ll_create("ll_cool_chanhandle"); } dmx->demux_id = demux_id; dmx->pid = -1; //dmx->device = dmx_handles[demux_index].handle; dmx->opened = 1; pthread_mutexattr_t attr; SAFE_MUTEXATTR_INIT(&attr); SAFE_MUTEXATTR_SETTYPE(&attr, PTHREAD_MUTEX_ERRORCHECK_NP); SAFE_MUTEX_INIT(&dmx->mutex, &attr); SAFE_MUTEX_UNLOCK(&demux_lock); return dmx->fd; }
void coolapi_close_all(void) { SAFE_MUTEX_LOCK(&demux_lock); if(!dmx_opened) { SAFE_MUTEX_UNLOCK(&demux_lock); return; } int32_t i, j; for(i = 0; i < MAX_COOL_DMX; i++) { for(j = 0; j < MAX_FILTER; j++) { if(cdemuxes[i][j].fd > 0) { coolapi_remove_filter(cdemuxes[i][j].fd, cdemuxes[i][j].filter_num); coolapi_close_device(cdemuxes[i][j].fd); } } } coolapi_dmx_close(); coolapi_stop_api(); cool_kal_opened = 0; SAFE_MUTEX_UNLOCK(&demux_lock); }
static void coolapi_read_data(dmx_t *dmx, dmx_callback_data_t *data) { if(!dmx) { cs_log_dbg(D_DVBAPI, "handle is NULL!"); return; } int32_t ret; uchar buffer[4096]; SAFE_SETSPECIFIC(getclient, dvbapi_client); SAFE_MUTEX_LOCK(&dmx->mutex); memset(buffer, 0, sizeof(buffer)); ret = coolapi_read(dmx, data, buffer); SAFE_MUTEX_UNLOCK(&dmx->mutex); if(ret > -1) { uint16_t filters = data->num; uint16_t flt; for (flt = 0; flt < filters; flt++) { uint32_t n = (uint32_t)data->tags[flt]; S_COOL_FILTER *filter = find_filter_by_channel(data->channel, n); if (!filter || data->filters[flt] != filter->filter) { cs_log_dbg(D_DVBAPI, "filter not found in notification!!!!"); continue; } dvbapi_process_input(dmx->demux_id, n, buffer, data->len); } } }
/** * called by signal SIGHUP * * reloads configs: * - useraccounts (oscam.user) * - readers (oscam.server) * - services ids (oscam.srvid) * - tier ids (oscam.tiers) * Also clears anticascading stats. **/ static void cs_reload_config(void) { static pthread_mutex_t mutex; static int8_t mutex_init = 0; if(!mutex_init) { SAFE_MUTEX_INIT(&mutex, NULL); mutex_init = 1; } if(pthread_mutex_trylock(&mutex)) { return; } cs_accounts_chk(); reload_readerdb(); init_provid(); init_srvid(); init_tierid(); init_fakecws(); ac_init_stat(); cs_reopen_log(); // FIXME: aclog.log, emm logs, cw logs (?) SAFE_MUTEX_UNLOCK(&mutex); }
static void * card_poll(void) { struct s_client *cl; struct s_reader *rdr; pthread_mutex_t card_poll_sleep_cond_mutex; SAFE_MUTEX_INIT(&card_poll_sleep_cond_mutex, NULL); SAFE_COND_INIT(&card_poll_sleep_cond, NULL); set_thread_name(__func__); while (!exit_oscam) { cs_readlock(__func__, &readerlist_lock); for (rdr=first_active_reader; rdr; rdr=rdr->next) { if (rdr->enable && rdr->card_status == CARD_INSERTED) { cl = rdr->client; if (cl && !cl->kill) { add_job(cl, ACTION_READER_POLL_STATUS, 0, 0); } } } cs_readunlock(__func__, &readerlist_lock); struct timespec ts; struct timeval tv; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; ts.tv_sec += 1; SAFE_MUTEX_LOCK(&card_poll_sleep_cond_mutex); SAFE_COND_TIMEDWAIT(&card_poll_sleep_cond, &card_poll_sleep_cond_mutex, &ts); // sleep on card_poll_sleep_cond SAFE_MUTEX_UNLOCK(&card_poll_sleep_cond_mutex); } return NULL; }
void coolapi_open_all(void) { SAFE_MUTEX_LOCK(&demux_lock); coolapi_start_api(); cool_kal_opened = 1; coolapi_dmx_open(); SAFE_MUTEX_UNLOCK(&demux_lock); }
static void SSL_dyn_lock_function(int32_t mode, struct CRYPTO_dynlock_value *l, const char *file, int32_t line) { if(mode & CRYPTO_LOCK) { SAFE_MUTEX_LOCK(&l->mutex); } else { SAFE_MUTEX_UNLOCK(&l->mutex); } // just to remove compiler warnings... if(file || line) { return; } }
static void stapi_off(void) { int32_t i; uint32_t ErrorCode; SAFE_MUTEX_LOCK(&filter_lock); cs_log("stapi shutdown"); disable_pmt_files = 1; stapi_on = 0; for(i = 0; i < MAX_DEMUX; i++) { dvbapi_stop_descrambling(i); if (tkd_desc_info[i].path_hndl != 0) { ErrorCode = oscam_sttkd_Deallocate(tkd_desc_info[i].path_hndl, tkd_desc_info[i].key_hndl); if (ErrorCode != 0) { cs_log("oscam_sttkd_Deallocate faild! ErrorCode: %d", ErrorCode); } } } uint8_t TKD_InstanceID = 0; for(TKD_InstanceID = 0; TKD_InstanceID < TKD_MAX_NUMBER; TKD_InstanceID++) { ErrorCode = oscam_sttkd_Close(TKDHandle[TKD_InstanceID]); if(ErrorCode != 0) { cs_log("oscam_sttkd_Close: ErrorCode: %d TKDHandle: 0x%08X", ErrorCode, TKDHandle[TKD_InstanceID]); } } for(i = 0; i < PTINUM; i++) { if(dev_list[i].SessionHandle > 0) { if(dev_list[i].SignalHandle > 0) { oscam_stapi5_SignalAbort(dev_list[i].SignalHandle); } pthread_cancel(dev_list[i].thread); } } SAFE_MUTEX_UNLOCK(&filter_lock); sleep(2); return; }
int32_t coolapi_close_device(int32_t fd) { dmx_t *dmx = find_demux(fd, 0); if(!dmx) { cs_log_dbg(D_DVBAPI, "dmx is NULL!"); SAFE_MUTEX_UNLOCK(&demux_lock); return -1; } cs_log_dbg(D_DVBAPI, "closing fd=%08x", fd); dmx->opened = 0; pthread_mutex_destroy(&dmx->mutex); memset(dmx, 0, sizeof(dmx_t)); return 0; }
void free_joblist(struct s_client *cl) { int32_t lock_status = pthread_mutex_trylock(&cl->thread_lock); LL_ITER it = ll_iter_create(cl->joblist); struct job_data *data; while((data = ll_iter_next(&it))) { free_job_data(data); } ll_destroy(&cl->joblist); cl->account = NULL; if(cl->work_job_data) // Free job_data that was not freed by work_thread { free_job_data(cl->work_job_data); } cl->work_job_data = NULL; if(lock_status == 0) { SAFE_MUTEX_UNLOCK(&cl->thread_lock); } pthread_mutex_destroy(&cl->thread_lock); }
bool cacheex_check_queue_length(struct s_client *cl) { // Avoid full running queues: if(ll_count(cl->joblist) <= 2000) return 0; cs_log_dbg(D_TRACE, "WARNING: job queue %s %s has more than 2000 jobs! count=%d, dropped!", cl->typ == 'c' ? "client" : "reader", username(cl), ll_count(cl->joblist)); // Thread down??? SAFE_MUTEX_LOCK(&cl->thread_lock); if(cl && !cl->kill && cl->thread && cl->thread_active) { // Just test for invalid thread id: if(pthread_detach(cl->thread) == ESRCH) { cl->thread_active = 0; cs_log_dbg(D_TRACE, "WARNING: %s %s thread died!", cl->typ == 'c' ? "client" : "reader", username(cl)); } } SAFE_MUTEX_UNLOCK(&cl->thread_lock); return 1; }
static void *stapi_read_thread(void *sparam) { int32_t dev_index, ErrorCode, i, j, CRCValid; uint32_t QueryBufferHandle = 0, DataSize = 0; uchar buf[BUFFLEN]; struct read_thread_param *para = sparam; dev_index = para->id; SAFE_SETSPECIFIC(getclient, para->cli); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_cleanup_push(stapi_cleanup_thread, (void *) dev_index); int32_t error_count = 0; while(1) { QueryBufferHandle = 0; ErrorCode = oscam_stapi5_SignalWaitBuffer(dev_list[dev_index].SignalHandle, &QueryBufferHandle, 1000); switch(ErrorCode) { case 0: // NO_ERROR: break; case 852042: // ERROR_SIGNAL_ABORTED cs_log("Caught abort signal"); pthread_exit(NULL); break; case 11: // ERROR_TIMEOUT: //cs_log("timeout %d", dev_index); //TODO: if pidindex == -1 try next continue; break; default: if(QueryBufferHandle != 0) { cs_log("SignalWaitBuffer error: %d", ErrorCode); oscam_stapi5_BufferFlush(QueryBufferHandle); continue; } cs_log("SignalWaitBuffer: index %d ErrorCode: %d - QueryBuffer: %x", dev_index, ErrorCode, QueryBufferHandle); error_count++; if(error_count > 10) { cs_log("Too many errors in reader thread %d, quitting.", dev_index); pthread_exit(NULL); } continue; break; } uint32_t NumFilterMatches = 0; int32_t demux_id = 0, filter_num = 0; DataSize = 0; uint32_t k; uint32_t MatchedFilterList[10]; ErrorCode = oscam_stapi5_BufferReadSection(QueryBufferHandle, MatchedFilterList, 10, &NumFilterMatches, &CRCValid, buf, BUFFLEN, &DataSize); if(ErrorCode != 0) { cs_log("BufferRead: index: %d ErrorCode: %d", dev_index, ErrorCode); cs_sleepms(1000); continue; } if(DataSize <= 0) { continue; } SAFE_MUTEX_LOCK(&filter_lock); // don't use cs_lock() here; multiple threads using same s_client struct for(k = 0; k < NumFilterMatches; k++) { for(i = 0; i < MAX_DEMUX; i++) { for(j = 0; j < MAX_FILTER; j++) { if(dev_list[dev_index].demux_fd[i][j].fd == MatchedFilterList[k]) { demux_id = i; filter_num = j; dvbapi_process_input(demux_id, filter_num, buf, DataSize); } } } } SAFE_MUTEX_UNLOCK(&filter_lock); } pthread_cleanup_pop(0); }
void *work_thread(void *ptr) { struct job_data *data = (struct job_data *)ptr; struct s_client *cl = data->cl; struct s_reader *reader = cl->reader; struct timeb start, end; // start time poll, end time poll struct job_data tmp_data; struct pollfd pfd[1]; SAFE_SETSPECIFIC(getclient, cl); cl->thread = pthread_self(); cl->thread_active = 1; set_work_thread_name(data); struct s_module *module = get_module(cl); uint16_t bufsize = module->bufsize; //CCCam needs more than 1024bytes! if(!bufsize) { bufsize = DEFAULT_MODULE_BUFSIZE; } uint8_t *mbuf; if(!cs_malloc(&mbuf, bufsize)) { return NULL; } cl->work_mbuf = mbuf; // Track locally allocated data, because some callback may call cs_exit/cs_disconect_client/pthread_exit and then mbuf would be leaked int32_t n = 0, rc = 0, i, idx, s; uint8_t dcw[16]; int8_t restart_reader = 0; while(cl->thread_active) { cs_ftime(&start); // register start time while(cl->thread_active) { if(!cl || cl->kill || !is_valid_client(cl)) { SAFE_MUTEX_LOCK(&cl->thread_lock); cl->thread_active = 0; SAFE_MUTEX_UNLOCK(&cl->thread_lock); cs_log_dbg(D_TRACE, "ending thread (kill)"); __free_job_data(cl, data); cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf) free_client(cl); if(restart_reader) { restart_cardreader(reader, 0); } NULLFREE(mbuf); pthread_exit(NULL); return NULL; } if(data && data->action != ACTION_READER_CHECK_HEALTH) { cs_log_dbg(D_TRACE, "data from add_job action=%d client %c %s", data->action, cl->typ, username(cl)); } if(!data) { if(!cl->kill && cl->typ != 'r') { client_check_status(cl); } // do not call for physical readers as this might cause an endless job loop SAFE_MUTEX_LOCK(&cl->thread_lock); if(cl->joblist && ll_count(cl->joblist) > 0) { LL_ITER itr = ll_iter_create(cl->joblist); data = ll_iter_next_remove(&itr); if(data) { set_work_thread_name(data); } //cs_log_dbg(D_TRACE, "start next job from list action=%d", data->action); } SAFE_MUTEX_UNLOCK(&cl->thread_lock); } if(!data) { /* for serial client cl->pfd is file descriptor for serial port not socket for example: pfd=open("/dev/ttyUSB0"); */ if(!cl->pfd || module->listenertype == LIS_SERIAL) { break; } pfd[0].fd = cl->pfd; pfd[0].events = POLLIN | POLLPRI; SAFE_MUTEX_LOCK(&cl->thread_lock); cl->thread_active = 2; SAFE_MUTEX_UNLOCK(&cl->thread_lock); rc = poll(pfd, 1, 3000); SAFE_MUTEX_LOCK(&cl->thread_lock); cl->thread_active = 1; SAFE_MUTEX_UNLOCK(&cl->thread_lock); if(rc > 0) { cs_ftime(&end); // register end time cs_log_dbg(D_TRACE, "[OSCAM-WORK] new event %d occurred on fd %d after %"PRId64" ms inactivity", pfd[0].revents, pfd[0].fd, comp_timeb(&end, &start)); data = &tmp_data; data->ptr = NULL; cs_ftime(&start); // register start time for new poll next run if(reader) { data->action = ACTION_READER_REMOTE; } else { if(cl->is_udp) { data->action = ACTION_CLIENT_UDP; data->ptr = mbuf; data->len = bufsize; } else { data->action = ACTION_CLIENT_TCP; } if(pfd[0].revents & (POLLHUP | POLLNVAL | POLLERR)) { cl->kill = 1; } } } } if(!data) { continue; } if(!reader && data->action < ACTION_CLIENT_FIRST) { __free_job_data(cl, data); break; } if(!data->action) { break; } struct timeb actualtime; cs_ftime(&actualtime); int64_t gone = comp_timeb(&actualtime, &data->time); if(data != &tmp_data && gone > (int) cfg.ctimeout+1000) { cs_log_dbg(D_TRACE, "dropping client data for %s time %"PRId64" ms", username(cl), gone); __free_job_data(cl, data); continue; } if(data != &tmp_data) { cl->work_job_data = data; } // Track the current job_data switch(data->action) { case ACTION_READER_IDLE: reader_do_idle(reader); break; case ACTION_READER_REMOTE: s = check_fd_for_data(cl->pfd); if(s == 0) // no data, another thread already read from fd? { break; } if(s < 0) { if(reader->ph.type == MOD_CONN_TCP) { network_tcp_connection_close(reader, "disconnect"); } break; } rc = reader->ph.recv(cl, mbuf, bufsize); if(rc < 0) { if(reader->ph.type == MOD_CONN_TCP) { network_tcp_connection_close(reader, "disconnect on receive"); } break; } cl->last = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER **************** idx = reader->ph.c_recv_chk(cl, dcw, &rc, mbuf, rc); if(idx < 0) { break; } // no dcw received if(!idx) { idx = cl->last_idx; } reader->last_g = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER **************** // for reconnect timeout for(i = 0, n = 0; i < cfg.max_pending && n == 0; i++) { if(cl->ecmtask[i].idx == idx) { cl->pending--; casc_check_dcw(reader, i, rc, dcw); n++; } } break; case ACTION_READER_RESET: cardreader_do_reset(reader); break; case ACTION_READER_ECM_REQUEST: reader_get_ecm(reader, data->ptr); break; case ACTION_READER_EMM: reader_do_emm(reader, data->ptr); break; case ACTION_READER_CARDINFO: reader_do_card_info(reader); break; case ACTION_READER_POLL_STATUS: cardreader_poll_status(reader); break; case ACTION_READER_INIT: if(!cl->init_done) { reader_init(reader); } break; case ACTION_READER_RESTART: cl->kill = 1; restart_reader = 1; break; case ACTION_READER_RESET_FAST: reader->card_status = CARD_NEED_INIT; cardreader_do_reset(reader); break; case ACTION_READER_CHECK_HEALTH: cardreader_do_checkhealth(reader); break; case ACTION_READER_CAPMT_NOTIFY: if(reader->ph.c_capmt) { reader->ph.c_capmt(cl, data->ptr); } break; case ACTION_CLIENT_UDP: n = module->recv(cl, data->ptr, data->len); if(n < 0) { break; } module->s_handler(cl, data->ptr, n); break; case ACTION_CLIENT_TCP: s = check_fd_for_data(cl->pfd); if(s == 0) // no data, another thread already read from fd? { break; } if(s < 0) // system error or fd wants to be closed { cl->kill = 1; // kill client on next run continue; } n = module->recv(cl, mbuf, bufsize); if(n < 0) { cl->kill = 1; // kill client on next run continue; } module->s_handler(cl, mbuf, n); break; case ACTION_CACHEEX1_DELAY: cacheex_mode1_delay(data->ptr); break; case ACTION_CACHEEX_TIMEOUT: cacheex_timeout(data->ptr); break; case ACTION_FALLBACK_TIMEOUT: fallback_timeout(data->ptr); break; case ACTION_CLIENT_TIMEOUT: ecm_timeout(data->ptr); break; case ACTION_ECM_ANSWER_READER: chk_dcw(data->ptr); break; case ACTION_ECM_ANSWER_CACHE: write_ecm_answer_fromcache(data->ptr); break; case ACTION_CLIENT_INIT: if(module->s_init) { module->s_init(cl); } cl->is_udp = module->type == MOD_CONN_UDP; cl->init_done = 1; break; case ACTION_CLIENT_IDLE: if(module->s_idle) { module->s_idle(cl); } else { cs_log("user %s reached %d sec idle limit.", username(cl), cfg.cmaxidle); cl->kill = 1; } break; case ACTION_CACHE_PUSH_OUT: { cacheex_push_out(cl, data->ptr); break; } case ACTION_CLIENT_KILL: cl->kill = 1; break; case ACTION_CLIENT_SEND_MSG: { if (config_enabled(MODULE_CCCAM)) { struct s_clientmsg *clientmsg = (struct s_clientmsg *)data->ptr; cc_cmd_send(cl, clientmsg->msg, clientmsg->len, clientmsg->cmd); } break; } } // switch __free_job_data(cl, data); } if(thread_pipe[1] && (mbuf[0] != 0x00)) { cs_log_dump_dbg(D_TRACE, mbuf, 1, "[OSCAM-WORK] Write to pipe:"); if(write(thread_pipe[1], mbuf, 1) == -1) // wakeup client check { cs_log_dbg(D_TRACE, "[OSCAM-WORK] Writing to pipe failed (errno=%d %s)", errno, strerror(errno)); } } // Check for some race condition where while we ended, another thread added a job SAFE_MUTEX_LOCK(&cl->thread_lock); if(cl->joblist && ll_count(cl->joblist) > 0) { SAFE_MUTEX_UNLOCK(&cl->thread_lock); continue; } else { cl->thread_active = 0; SAFE_MUTEX_UNLOCK(&cl->thread_lock); break; } } cl->thread_active = 0; cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf) NULLFREE(mbuf); pthread_exit(NULL); return NULL; }
/** * adds a job to the job queue * if ptr should be free() after use, set len to the size * else set size to 0 **/ int32_t add_job(struct s_client *cl, enum actions action, void *ptr, int32_t len) { if(!cl || cl->kill) { if(!cl) { cs_log("WARNING: add_job failed. Client killed!"); } // Ignore jobs for killed clients if(len && ptr) { NULLFREE(ptr); } return 0; } if(action == ACTION_CACHE_PUSH_OUT && cacheex_check_queue_length(cl)) { if(len && ptr) { NULLFREE(ptr); } return 0; } struct job_data *data; if(!cs_malloc(&data, sizeof(struct job_data))) { if(len && ptr) { NULLFREE(ptr); } return 0; } data->action = action; data->ptr = ptr; data->cl = cl; data->len = len; cs_ftime(&data->time); SAFE_MUTEX_LOCK(&cl->thread_lock); if(cl && !cl->kill && cl->thread_active) { if(!cl->joblist) { cl->joblist = ll_create("joblist"); } ll_append(cl->joblist, data); if(cl->thread_active == 2) { pthread_kill(cl->thread, OSCAM_SIGNAL_WAKEUP); } SAFE_MUTEX_UNLOCK(&cl->thread_lock); cs_log_dbg(D_TRACE, "add %s job action %d queue length %d %s", action > ACTION_CLIENT_FIRST ? "client" : "reader", action, ll_count(cl->joblist), username(cl)); return 1; } /* pcsc doesn't like this; segfaults on x86, x86_64 */ int8_t modify_stacksize = 0; struct s_reader *rdr = cl->reader; if(cl->typ != 'r' || !rdr || rdr->typ != R_PCSC) { modify_stacksize = 1; } if(action != ACTION_READER_CHECK_HEALTH) { cs_log_dbg(D_TRACE, "start %s thread action %d", action > ACTION_CLIENT_FIRST ? "client" : "reader", action); } int32_t ret = start_thread("client work", work_thread, (void *)data, &cl->thread, 1, modify_stacksize); if(ret) { cs_log("ERROR: can't create thread for %s (errno=%d %s)", action > ACTION_CLIENT_FIRST ? "client" : "reader", ret, strerror(ret)); free_job_data(data); } cl->thread_active = 1; SAFE_MUTEX_UNLOCK(&cl->thread_lock); return 1; }
int32_t coolapi_remove_filter(int32_t fd, int32_t num) { void * channel = NULL; void * filter = NULL; dmx_t *dmx = find_demux(fd, 0); if(!dmx) { cs_log_dbg(D_DVBAPI, "dmx is NULL!"); return -1; } if(dmx->pid <= 0) { return -1; } int32_t result; SAFE_MUTEX_LOCK(&dmx->mutex); // Find matching channel, if it exists. S_COOL_CHANHANDLE *handle_item = find_chanhandle(COOLDEMUX_DMX_DEV(fd), dmx->pid); if (!handle_item) { SAFE_MUTEX_UNLOCK(&dmx->mutex); cs_log_dbg(D_DVBAPI, "removing filter fd=%08x num=%d pid=%04xcfailed, channel does not exist.", fd, num, dmx->pid); return -1; } channel = handle_item->channel; cs_log_dbg(D_DVBAPI, "removing filter fd=%08x num=%d pid=%04x on channel=%p", fd, num, dmx->pid, channel); S_COOL_FILTER *filter_item = find_filter_by_chanhandle(handle_item, num); if(filter_item) { result = cnxt_dmx_channel_suspend(channel, 1); coolapi_check_error("cnxt_dmx_channel_suspend", result); result = cnxt_dmx_channel_detach_filter(channel, filter_item->filter); coolapi_check_error("cnxt_dmx_channel_detach_filter", result); #if 0 result = cnxt_dmx_close_filter(filter_item->filter); coolapi_check_error("cnxt_dmx_close_filter", result); #endif filter = filter_item->filter; remove_filter(filter_item); handle_item->allocated_filters--; } else { SAFE_MUTEX_UNLOCK(&dmx->mutex); cs_log_dbg(D_DVBAPI, "removing filter fd=%08x num=%d pid=%04x on channel=%x failed, channel does not exist.", fd, num, dmx->pid, (int32_t) handle_item->channel); return -1; } if (!handle_item->allocated_filters) { result = cnxt_dmx_channel_ctrl(channel, 0, 0); coolapi_check_error("cnxt_dmx_channel_ctrl", result); cs_log_dbg(D_DVBAPI, "closing channel %x", (int32_t) channel); result = cnxt_dmx_set_channel_pid(channel, 0x1FFF); coolapi_check_error("cnxt_dmx_set_channel_pid", result); result = cnxt_cbuf_flush(handle_item->buffer1, 0); coolapi_check_error("cnxt_cbuf_flush", result); result = cnxt_cbuf_flush(handle_item->buffer2, 0); coolapi_check_error("cnxt_cbuf_flush", result); result = cnxt_cbuf_detach(handle_item->buffer2, 2, channel); coolapi_check_error("cnxt_cbuf_detach", result); result = cnxt_dmx_channel_detach(channel, 0xB, 0, handle_item->buffer1); coolapi_check_error("cnxt_dmx_channel_detach", result); #if 0 result = cnxt_dmx_channel_close(channel); coolapi_check_error("cnxt_dmx_channel_close", result); #endif result = cnxt_cbuf_close(handle_item->buffer2); coolapi_check_error("cnxt_cbuf_close", result); result = cnxt_cbuf_close(handle_item->buffer1); coolapi_check_error("cnxt_cbuf_close", result); handle_item->channel = NULL; handle_item->buffer1 = NULL; handle_item->buffer2 = NULL; remove_chanhandle(handle_item); dmx_handles[COOLDEMUX_DMX_DEV(fd)].allocated_channels--; dmx->pid = -1; } else { result = cnxt_dmx_channel_suspend(channel, 0); coolapi_check_error("cnxt_dmx_channel_suspend", result); channel = NULL; } SAFE_MUTEX_UNLOCK(&dmx->mutex); if (filter) { result = cnxt_dmx_close_filter(filter); coolapi_check_error("cnxt_dmx_close_filter", result); } if (channel) { result = cnxt_dmx_channel_close(channel); coolapi_check_error("cnxt_dmx_channel_close", result); } return 0; }
int32_t coolapi_set_filter(int32_t fd, int32_t num, int32_t pid, uchar *flt, uchar *mask, int32_t type) { dmx_t *dmx = find_demux(fd, 0); if(!dmx) { cs_log_dbg(D_DVBAPI, "dmx is NULL!"); return -1; } int32_t result, channel_found; SAFE_MUTEX_LOCK(&dmx->mutex); // Find matching channel, if it exists. S_COOL_CHANHANDLE *handle_item = find_chanhandle(COOLDEMUX_DMX_DEV(fd), pid); if(!handle_item) { // No channel was found, allocate one buffer_open_arg_t bufarg; int32_t uBufferSize = 8192 + 64; /* Mark that we did not find any open channel on this PID */ channel_found = 0; if(!cs_malloc(&handle_item, sizeof(S_COOL_CHANHANDLE))) { return -1; } memset(&bufarg, 0, sizeof(bufarg)); #ifdef HAVE_COOLAPI2 bufarg.poolid = 5 #endif bufarg.type = 3; bufarg.size = uBufferSize; bufarg.hwm = (uBufferSize * 7) / 8; result = cnxt_cbuf_open(&handle_item->buffer1, &bufarg, NULL, NULL); coolapi_check_error("cnxt_cbuf_open", result); bufarg.type = 0; #ifdef HAVE_COOLAPI2 bufarg.poolid = 0 #endif result = cnxt_cbuf_open(&handle_item->buffer2, &bufarg, NULL, NULL); coolapi_check_error("cnxt_cbuf_open", result); channel_open_arg_t chanarg; memset(&chanarg, 0, sizeof(channel_open_arg_t)); chanarg.type = 4; result = cnxt_dmx_channel_open(dmx_handles[COOLDEMUX_DMX_DEV(fd)].handle, &handle_item->channel, &chanarg, dmx_callback, dmx); coolapi_check_error("cnxt_dmx_channel_open", result); result = cnxt_dmx_set_channel_buffer(handle_item->channel, 0, handle_item->buffer1); coolapi_check_error("cnxt_dmx_set_channel_buffer", result); result = cnxt_dmx_channel_attach(handle_item->channel, 0xB, 0, handle_item->buffer2); coolapi_check_error("cnxt_dmx_channel_attach", result); result = cnxt_cbuf_attach(handle_item->buffer2, 2, handle_item->channel); coolapi_check_error("cnxt_cbuf_attach", result); result = cnxt_dmx_set_channel_pid(handle_item->channel, pid); coolapi_check_error("cnxt_dmx_set_channel_pid", result); result = cnxt_cbuf_flush(handle_item->buffer1, 0); coolapi_check_error("cnxt_cbuf_flush", result); result = cnxt_cbuf_flush(handle_item->buffer2, 0); coolapi_check_error("cnxt_cbuf_flush", result); handle_item->pid = pid; handle_item->dmx_handle = &dmx_handles[COOLDEMUX_DMX_DEV(fd)]; dmx_handles[COOLDEMUX_DMX_DEV(fd)].allocated_channels++; ll_append(ll_cool_chanhandle, handle_item); cs_log_dbg(D_DVBAPI, "opened new channel %x", (int32_t) handle_item->channel);; } else { channel_found = 1; } cs_log_dbg(D_DVBAPI, "setting new filter fd=%08x demux=%d channel=%x num=%d pid=%04x flt=%x mask=%x", fd, COOLDEMUX_DMX_DEV(fd), (int32_t) handle_item->channel, num, pid, flt[0], mask[0]); void *filter_handle = NULL; filter_set_t filterset; int32_t has_filter = 0; S_COOL_FILTER *filter_item = find_filter_by_chanhandle(handle_item, num); if (filter_item && type == dmx->type && pid == dmx->pid && (memcmp(flt, filter_item->filter16, 16) || memcmp(mask, filter_item->mask16, 16))) { cs_log_dbg(D_DVBAPI, "setting new filter fd=%08x demux=%d channel=%x num=%d pid=%04x flt=%x mask=%x, filter exists.. modifying", fd, COOLDEMUX_DMX_DEV(fd), (int32_t) handle_item->channel, num, pid, flt[0], mask[0]); filter_handle = filter_item->filter; has_filter = 1; memcpy(filter_item->filter16, flt, 16); memcpy(filter_item->mask16, mask, 16); } else { dmx->pid = pid; dmx->type = type; dmx->filter_num = num; result = cnxt_dmx_open_filter(dmx_handles[COOLDEMUX_DMX_DEV(fd)].handle, &filter_handle); coolapi_check_error("cnxt_dmx_open_filter", result); if(!cs_malloc(&filter_item, sizeof(S_COOL_FILTER))) { SAFE_MUTEX_UNLOCK(&dmx->mutex); return -1; } // fill filter item filter_item->fd = fd; filter_item->filter = filter_handle; filter_item->filter_num = num; filter_item->chanhandle = handle_item; memcpy(filter_item->filter16, flt, 16); memcpy(filter_item->mask16, mask, 16); //add filter item ll_append(ll_cool_filter, filter_item); // increase allocated filters handle_item->allocated_filters++; } if (has_filter) { result = cnxt_dmx_channel_suspend(handle_item->channel, 1); coolapi_check_error("cnxt_dmx_channel_suspend", result); result = cnxt_dmx_channel_detach_filter(handle_item->channel, filter_handle); coolapi_check_error("cnxt_dmx_channel_detach_filter", result); } memset(&filterset, 0, sizeof(filterset)); filterset.length = 12; memcpy(filterset.filter, flt, 16); memcpy(filterset.mask, mask, 16); result = cnxt_dmx_set_filter(filter_handle, &filterset, (void *)num); coolapi_check_error("cnxt_dmx_set_filter", result); result = cnxt_dmx_channel_attach_filter(handle_item->channel, filter_handle); coolapi_check_error("cnxt_dmx_channel_attach_filter", result); if (has_filter) { result = cnxt_dmx_channel_suspend(handle_item->channel, 0); coolapi_check_error("cnxt_dmx_channel_suspend", result); } if(!channel_found) { // Start channel result = cnxt_dmx_channel_ctrl(handle_item->channel, 2, 0); coolapi_check_error("cnxt_dmx_channel_ctrl", result); } SAFE_MUTEX_UNLOCK(&dmx->mutex); return 0; }