Ejemplo n.º 1
0
static void oscam_ser_server(void)
{
  int32_t n;
  uchar mbuf[1024];

  int32_t * pserial_errors = &cur_client()->serialdata->serial_errors;

  cur_client()->serialdata->connected=0;
  oscam_ser_init_client();

  while ((n=process_input(mbuf, sizeof(mbuf), INT_MAX))>0)
  {
    if ((*pserial_errors) > 3)
    {
      cs_log("too many errors, reiniting...");
      break;
    }
    oscam_ser_auth_client(mbuf[0] & 0xF);
    switch (mbuf[0]>>4)
    {
      case IS_ECM:
        oscam_ser_process_ecm(mbuf+1, n-1);
        break;
      case IS_PMT:
        oscam_ser_process_pmt(mbuf+1, n-1);
        break;
      case IS_LGO:
        oscam_ser_client_logon(mbuf+1, n-1);
        break;
    }
  }
  if (cur_client()->serialdata->oscam_ser_port > 0)
    network_tcp_connection_close(cur_client()->reader, "error reading from socket");
  oscam_ser_disconnect();
}
Ejemplo n.º 2
0
static int32_t __camd35_send(struct s_client *cl, uchar *buf, int32_t buflen, int answer_awaited)
{
	int32_t l;
	unsigned char rbuf[REQ_SIZE + 15 + 4], *sbuf = rbuf + 4;

	if(!cl->udp_fd || !cl->crypted) { return (-1); }  //exit if no fd or aes key not set!

	//Fix ECM len > 255
	if(buflen <= 0)
		{ buflen = ((buf[0] == 0) ? (((buf[21] & 0x0f) << 8) | buf[22]) + 3 : buf[1]); }
	l = 20 + (((buf[0] == 3) || (buf[0] == 4)) ? 0x34 : 0) + buflen;
	memcpy(rbuf, cl->ucrc, 4);
	memcpy(sbuf, buf, l);
	memset(sbuf + l, 0xff, 15); // set unused space to 0xff for newer camd3's
	i2b_buf(4, crc32(0L, sbuf + 20, buflen), sbuf + 4);
	l = boundary(4, l);
	cs_log_dump_dbg(cl->typ == 'c' ? D_CLIENT : D_READER, sbuf, l, "send %d bytes to %s", l, username(cl));
	aes_encrypt_idx(cl->aes_keys, sbuf, l);

	int32_t status;
	if(cl->is_udp)
	{
		status = sendto(cl->udp_fd, rbuf, l + 4, 0, (struct sockaddr *)&cl->udp_sa, cl->udp_sa_len);
		if(status == -1) { set_null_ip(&SIN_GET_ADDR(cl->udp_sa)); }
	}
	else
	{
		status = send(cl->udp_fd, rbuf, l + 4, 0);

		if(cl->typ == 'p' && cl->reader)
		{
			if(status == -1) { network_tcp_connection_close(cl->reader, "can't send"); }
		}
		else if(cl->typ == 'c')
		{
			if(status == -1) { cs_disconnect_client(cl); }
		}
	}
	if(status != -1)
	{
		if(cl->reader && answer_awaited)
		{
			cl->reader->last_s = time(NULL);
		}
		if(cl->reader && !answer_awaited)
		{
			cl->reader->last_s = cl->reader->last_g = time(NULL);
		}
		cl->last = time(NULL);

	}
	return status;
}
Ejemplo n.º 3
0
void radegast_idle(void) {
	struct s_client *client = cur_client();
	struct s_reader *rdr = client->reader;

	if (!rdr) return;

	if (rdr->tcp_ito > 0) {
		// inactivitytimeout > 0 enables protocol
		time_t now;
		int32_t time_diff;
		time(&now);
		time_diff = abs(now - rdr->last_s);
		if (time_diff>(rdr->tcp_ito)) {
				network_tcp_connection_close(client->reader, "inactivity");
		}
	}
	else if (rdr->tcp_ito == -1) {
		// idle reconnect
		radegast_connect();
	}
}
Ejemplo n.º 4
0
void radegast_idle(void)
{
	struct s_client *client = cur_client();
	struct s_reader *rdr = client->reader;
	time_t now = time(NULL);
	if(!rdr) { return; }

	if(rdr->tcp_ito > 0)
	{
		int32_t time_diff;
		time_diff = llabs(now - rdr->last_s);
		if(time_diff > (rdr->tcp_ito))
		{
			network_tcp_connection_close(rdr, "inactivity");
			return;
		}
	}
	else if(rdr->tcp_ito == -1)
	{
		radegast_connect();
		return;
	}
}
Ejemplo n.º 5
0
static void process_clients(void) {
	int32_t i, k, j, rc, pfdcount = 0;
	struct s_client *cl;
	struct s_reader *rdr;
	struct pollfd *pfd;
	struct s_client **cl_list;
	uint32_t cl_size = 0;

	char buf[10];

	if (pipe(thread_pipe) == -1) {
		printf("cannot create pipe, errno=%d\n", errno);
		exit(1);
	}

	cl_size = chk_resize_cllist(&pfd, &cl_list, 0, 100);

	pfd[pfdcount].fd = thread_pipe[0];
	pfd[pfdcount].events = POLLIN | POLLPRI | POLLHUP;
	cl_list[pfdcount] = NULL;

	while (!exit_oscam) {
		pfdcount = 1;

		//connected tcp clients
		for (cl=first_client->next; cl; cl=cl->next) {
			if (cl->init_done && !cl->kill && cl->pfd && cl->typ=='c' && !cl->is_udp) {
				if (cl->pfd && !cl->thread_active) {
					cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount);
					cl_list[pfdcount] = cl;
					pfd[pfdcount].fd = cl->pfd;
					pfd[pfdcount++].events = POLLIN | POLLPRI | POLLHUP;
				}
			}
			//reader:
			//TCP:
			//	- TCP socket must be connected
			//	- no active init thread
			//UDP:
			//	- connection status ignored
			//	- no active init thread
			rdr = cl->reader;
			if (rdr && cl->typ=='p' && cl->init_done) {
				if (cl->pfd && !cl->thread_active && ((rdr->tcp_connected && rdr->ph.type==MOD_CONN_TCP)||(rdr->ph.type==MOD_CONN_UDP))) {
					cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount);
					cl_list[pfdcount] = cl;
					pfd[pfdcount].fd = cl->pfd;
					pfd[pfdcount++].events = POLLIN | POLLPRI | POLLHUP;
				}
			}
		}

		//server (new tcp connections or udp messages)
		for (k = 0; k < CS_MAX_MOD; k++) {
			struct s_module *module = &modules[k];
			if ((module->type & MOD_CONN_NET)) {
				for (j = 0; j < module->ptab.nports; j++) {
					if (module->ptab.ports[j].fd) {
						cl_size = chk_resize_cllist(&pfd, &cl_list, cl_size, pfdcount);
						cl_list[pfdcount] = NULL;
						pfd[pfdcount].fd = module->ptab.ports[j].fd;
						pfd[pfdcount++].events = POLLIN | POLLPRI | POLLHUP;
					}
				}
			}
		}

		if (pfdcount >= 1024)
			cs_log("WARNING: too many users!");

		rc = poll(pfd, pfdcount, 5000);

		if (rc<1)
			continue;

		for (i=0; i<pfdcount; i++) {
			//clients
			cl = cl_list[i];
			if (cl && !is_valid_client(cl))
				continue;

			if (pfd[i].fd == thread_pipe[0] && (pfd[i].revents & (POLLIN | POLLPRI))) {
				// a thread ended and cl->pfd should be added to pollfd list again (thread_active==0)
				if(read(thread_pipe[0], buf, sizeof(buf)) == -1){
					cs_debug_mask(D_TRACE, "Reading from pipe failed (errno=%d %s)", errno, strerror(errno));
				}
				continue;
			}

			//clients
			// message on an open tcp connection
			if (cl && cl->init_done && cl->pfd && (cl->typ == 'c' || cl->typ == 'm')) {
				if (pfd[i].fd == cl->pfd && (pfd[i].revents & (POLLHUP | POLLNVAL))) {
					//client disconnects
					kill_thread(cl);
					continue;
				}
				if (pfd[i].fd == cl->pfd && (pfd[i].revents & (POLLIN | POLLPRI))) {
					add_job(cl, ACTION_CLIENT_TCP, NULL, 0);
				}
			}


			//reader
			// either an ecm answer, a keepalive or connection closed from a proxy
			// physical reader ('r') should never send data without request
			rdr = NULL;
			struct s_client *cl2 = NULL;
			if (cl && cl->typ == 'p'){
				rdr = cl->reader;
				if(rdr)
					cl2 = rdr->client;
			}

			if (rdr && cl2 && cl2->init_done) {
				if (cl2->pfd && pfd[i].fd == cl2->pfd && (pfd[i].revents & (POLLHUP | POLLNVAL))) {
					//connection to remote proxy was closed
					//oscam should check for rdr->tcp_connected and reconnect on next ecm request sent to the proxy
					network_tcp_connection_close(rdr, "closed");
					rdr_debug_mask(rdr, D_READER, "connection closed");
				}
				if (cl2->pfd && pfd[i].fd == cl2->pfd && (pfd[i].revents & (POLLIN | POLLPRI))) {
					add_job(cl2, ACTION_READER_REMOTE, NULL, 0);
				}
			}


			//server sockets
			// new connection on a tcp listen socket or new message on udp listen socket
			if (!cl && (pfd[i].revents & (POLLIN | POLLPRI))) {
				for (k = 0; k < CS_MAX_MOD; k++) {
					struct s_module *module = &modules[k];
					if ((module->type & MOD_CONN_NET)) {
						for (j = 0; j < module->ptab.nports; j++) {
							if (module->ptab.ports[j].fd && module->ptab.ports[j].fd == pfd[i].fd) {
								accept_connection(module, k, j);
							}
						}
					}
				}
			}
		}
		first_client->last=time((time_t *)0);
	}
	free(pfd);
	free(cl_list);
	return;
}
Ejemplo n.º 6
0
static int32_t ghttp_recv_chk(struct s_client *client, uchar *dcw, int32_t *rc, uchar *buf, int32_t UNUSED(n))
{
  char* data;
  char* lenstr;
  int rcode, len = 0;
  s_ghttp* context = (s_ghttp*)client->ghttp;
  ECM_REQUEST *er = &context->last_ecm;

  data = strstr((char*)buf, "HTTP/1.1");
  if(!data) {
    cs_debug_mask(D_CLIENT, "%s: non http or otherwise corrupt response/disconnect: %s", client->reader->label, buf);     
    network_tcp_connection_close(client->reader, "receive error or idle timeout");
    return -1;
  }
  data = data + strlen("HTTP/1.1 ");
  rcode = atoi(data);
  if(rcode < 200 || rcode > 204) {
    cs_debug_mask(D_CLIENT, "%s: http error code %d", client->reader->label, rcode);    
    data = strstr(data, "Content-Type: application/octet-stream"); // if not octet-stream, google error. need reconnect?
    if(data) // we have error info string in data
    { 
      lenstr = strstr((char*)buf, "Content-Length: ");
      if(lenstr) {
        lenstr = lenstr + strlen("Content-Length: ");
        len = atoi(lenstr);
      }
      data = strstr(data, "\r\n\r\n") + 4;
      if(data) {
        data[len] = '\0';
        cs_debug_mask(D_CLIENT, "%s: http error message: %s", client->reader->label, data);
      }      
    }
    if(rcode == 503) {
      context->prev_sid = 0;
      if(context->do_post_next) {
        cs_debug_mask(D_CLIENT, "%s: recv_chk got 503 despite post, trying reconnect", client->reader->label);        
        network_tcp_connection_close(client->reader, "timeout");
        return -1;
      } else {
        // on 503 timeout, switch to POST
        context->do_post_next = 1;
        cs_debug_mask(D_CLIENT, "%s: recv_chk got 503, trying direct post", client->reader->label);         
        _ghttp_post_ecmdata(client, er);

        *rc = 0;
        memset(dcw, 0, 16);
        return -1;
      }
    } else if(rcode == 401) {
      cs_debug_mask(D_CLIENT, "%s: session expired, trying direct post", client->reader->label); 
      context->do_post_next = 1;
      NULLFREE(context->session_id);
      _ghttp_post_ecmdata(client, er);

      *rc = 0;
      memset(dcw, 0, 16);
      return -1;
    }
    return -1;
  }

  // switch back to cache get after rapid ecm response (arbitrary atm), only effect is a slight bw save for client
  if(context->do_post_next && context->last_ecm.srvid == context->prev_sid) {
    if(client->cwlastresptime > 0 && client->cwlastresptime < 800) {
      cs_debug_mask(D_CLIENT, "%s: prev resp time for same sid was %d ms, switching back to cache get for next req", client->reader->label, client->cwlastresptime); 
      context->do_post_next = 0;
    }
  }

  data = strstr((char*)buf, "Set-Cookie: GSSID=");
  if(data) {
    data += strlen("Set-Cookie: GSSID=");
    NULLFREE(context->session_id);
    if(cs_malloc(&context->session_id, 7)) { // todo dont assume session id of length 6
      strncpy((char*)context->session_id, data, 6);
      context->session_id[6] = '\0';
      cs_debug_mask(D_CLIENT, "%s: set session_id to: %s", client->reader->label, context->session_id);
    }
  }

  data = strstr((char*)buf, "Content-Length: 16");
  if(data) {
    data = strstr((char*)buf, "\r\n\r\n");
    data += 4;
    memcpy(dcw, data, 16);
    *rc = 1;
    char tmp_dbg[33];
    cs_debug_mask(D_CLIENT, "%s: recv chk - %s", client->reader->label, cs_hexdump(0, dcw, 16, tmp_dbg, sizeof (tmp_dbg)));
    return client->reader->msg_idx;
  } else {
    cs_debug_mask(D_CLIENT, "%s: recv_chk fail!", client->reader->label);
  }
  return -1;
}
Ejemplo n.º 7
0
void *work_thread(void *ptr)
{
	struct job_data *data = (struct job_data *)ptr;
	struct s_client *cl = data->cl;
	struct s_reader *reader = cl->reader;
	struct timeb start, end;  // start time poll, end time poll

	struct job_data tmp_data;
	struct pollfd pfd[1];

	pthread_setspecific(getclient, cl);
	cl->thread = pthread_self();
	cl->thread_active = 1;

	set_work_thread_name(data);

	struct s_module *module = get_module(cl);
	uint16_t bufsize = module->bufsize; //CCCam needs more than 1024bytes!
	if(!bufsize)
		{ bufsize = 1024; }

	uint8_t *mbuf;
	if(!cs_malloc(&mbuf, bufsize))
		{ return NULL; }
	cl->work_mbuf = mbuf; // Track locally allocated data, because some callback may call cs_exit/cs_disconect_client/pthread_exit and then mbuf would be leaked
	int32_t n = 0, rc = 0, i, idx, s;
	uint8_t dcw[16];
	int8_t restart_reader = 0;
	while(cl->thread_active)
	{
		cs_ftime(&start); // register start time
		while(cl->thread_active)
		{
			if(!cl || cl->kill || !is_valid_client(cl))
			{
				pthread_mutex_lock(&cl->thread_lock);
				cl->thread_active = 0;
				pthread_mutex_unlock(&cl->thread_lock);
				cs_debug_mask(D_TRACE, "ending thread (kill)");
				__free_job_data(cl, data);
				cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf)
				free_client(cl);
				if(restart_reader)
					{ restart_cardreader(reader, 0); }
				NULLFREE(mbuf);
				pthread_exit(NULL);
				return NULL;
			}

			if(data && data->action != ACTION_READER_CHECK_HEALTH)
				{ cs_debug_mask(D_TRACE, "data from add_job action=%d client %c %s", data->action, cl->typ, username(cl)); }

			if(!data)
			{
				if(!cl->kill && cl->typ != 'r')
					{ client_check_status(cl); } // do not call for physical readers as this might cause an endless job loop
				pthread_mutex_lock(&cl->thread_lock);
				if(cl->joblist && ll_count(cl->joblist) > 0)
				{
					LL_ITER itr = ll_iter_create(cl->joblist);
					data = ll_iter_next_remove(&itr);
					if(data)
						{ set_work_thread_name(data); }
					//cs_debug_mask(D_TRACE, "start next job from list action=%d", data->action);
				}
				pthread_mutex_unlock(&cl->thread_lock);
			}

			if(!data)
			{
				/* for serial client cl->pfd is file descriptor for serial port not socket
				   for example: pfd=open("/dev/ttyUSB0"); */
				if(!cl->pfd || module->listenertype == LIS_SERIAL)
					{ break; }
				pfd[0].fd = cl->pfd;
				pfd[0].events = POLLIN | POLLPRI;

				pthread_mutex_lock(&cl->thread_lock);
				cl->thread_active = 2;
				pthread_mutex_unlock(&cl->thread_lock);
				rc = poll(pfd, 1, 3000);
				pthread_mutex_lock(&cl->thread_lock);
				cl->thread_active = 1;
				pthread_mutex_unlock(&cl->thread_lock);
				if(rc > 0)
				{
					cs_ftime(&end); // register end time
					cs_debug_mask(D_TRACE, "[OSCAM-WORK] new event %d occurred on fd %d after %"PRId64" ms inactivity", pfd[0].revents,
								  pfd[0].fd, comp_timeb(&end, &start));
					data = &tmp_data;
					data->ptr = NULL;
					cs_ftime(&start); // register start time for new poll next run

					if(reader)
						{ data->action = ACTION_READER_REMOTE; }
					else
					{
						if(cl->is_udp)
						{
							data->action = ACTION_CLIENT_UDP;
							data->ptr = mbuf;
							data->len = bufsize;
						}
						else
							{ data->action = ACTION_CLIENT_TCP; }
						if(pfd[0].revents & (POLLHUP | POLLNVAL | POLLERR))
							{ cl->kill = 1; }
					}
				}
			}

			if(!data)
				{ continue; }

			if(!reader && data->action < ACTION_CLIENT_FIRST)
			{
				__free_job_data(cl, data);
				break;
			}

			if(!data->action)
				{ break; }

			struct timeb actualtime;
			cs_ftime(&actualtime);
			int32_t gone = comp_timeb(&actualtime, &data->time);
			if(data != &tmp_data && gone > (int) cfg.ctimeout+1000)
			{
				cs_debug_mask(D_TRACE, "dropping client data for %s time %dms", username(cl), gone);
				__free_job_data(cl, data);
				continue;
			}

			if(data != &tmp_data)
				{ cl->work_job_data = data; } // Track the current job_data
			switch(data->action)
			{
			case ACTION_READER_IDLE:
				reader_do_idle(reader);
				break;
			case ACTION_READER_REMOTE:
				s = check_fd_for_data(cl->pfd);
				if(s == 0)  // no data, another thread already read from fd?
					{ break; }
				if(s < 0)
				{
					if(reader->ph.type == MOD_CONN_TCP)
						{ network_tcp_connection_close(reader, "disconnect"); }
					break;
				}
				rc = reader->ph.recv(cl, mbuf, bufsize);
				if(rc < 0)
				{
					if(reader->ph.type == MOD_CONN_TCP)
						{ network_tcp_connection_close(reader, "disconnect on receive"); }
					break;
				}
				cl->last = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER ****************
				idx = reader->ph.c_recv_chk(cl, dcw, &rc, mbuf, rc);
				if(idx < 0) { break; }  // no dcw received
				if(!idx) { idx = cl->last_idx; }
				reader->last_g = time(NULL); // *********************************** TO BE REPLACE BY CS_FTIME() LATER **************** // for reconnect timeout
				for(i = 0, n = 0; i < cfg.max_pending && n == 0; i++)
				{
					if(cl->ecmtask[i].idx == idx)
					{
						cl->pending--;
						casc_check_dcw(reader, i, rc, dcw);
						n++;
					}
				}
				break;
			case ACTION_READER_RESET:
				cardreader_do_reset(reader);
				break;
			case ACTION_READER_ECM_REQUEST:
				reader_get_ecm(reader, data->ptr);
				break;
			case ACTION_READER_EMM:
				reader_do_emm(reader, data->ptr);
				break;
			case ACTION_READER_CARDINFO:
				reader_do_card_info(reader);
				break;
			case ACTION_READER_INIT:
				if(!cl->init_done)
					{ reader_init(reader); }
				break;
			case ACTION_READER_RESTART:
				cl->kill = 1;
				restart_reader = 1;
				break;
			case ACTION_READER_RESET_FAST:
				reader->card_status = CARD_NEED_INIT;
				cardreader_do_reset(reader);
				break;
			case ACTION_READER_CHECK_HEALTH:
				cardreader_do_checkhealth(reader);
				break;
			case ACTION_READER_CAPMT_NOTIFY:
				if(reader->ph.c_capmt) { reader->ph.c_capmt(cl, data->ptr); }
				break;
			case ACTION_CLIENT_UDP:
				n = module->recv(cl, data->ptr, data->len);
				if(n < 0) { break; }
				module->s_handler(cl, data->ptr, n);
				break;
			case ACTION_CLIENT_TCP:
				s = check_fd_for_data(cl->pfd);
				if(s == 0)  // no data, another thread already read from fd?
					{ break; }
				if(s < 0)    // system error or fd wants to be closed
				{
					cl->kill = 1; // kill client on next run
					continue;
				}
				n = module->recv(cl, mbuf, bufsize);
				if(n < 0)
				{
					cl->kill = 1; // kill client on next run
					continue;
				}
				module->s_handler(cl, mbuf, n);
				break;
			case ACTION_CACHEEX_TIMEOUT:
#ifdef CS_CACHEEX
				cacheex_timeout(data->ptr);
#endif
				break;
			case ACTION_FALLBACK_TIMEOUT:
				fallback_timeout(data->ptr);
				break;
			case ACTION_CLIENT_TIMEOUT:
				ecm_timeout(data->ptr);
				break;
			case ACTION_ECM_ANSWER_READER:
				chk_dcw(data->ptr);
				break;
			case ACTION_ECM_ANSWER_CACHE:
				write_ecm_answer_fromcache(data->ptr);
				break;
			case ACTION_CLIENT_INIT:
				if(module->s_init)
					{ module->s_init(cl); }
				cl->is_udp = module->type == MOD_CONN_UDP;
				cl->init_done = 1;
				break;
			case ACTION_CLIENT_IDLE:
				if(module->s_idle)
					{ module->s_idle(cl); }
				else
				{
					cs_log("user %s reached %d sec idle limit.", username(cl), cfg.cmaxidle);
					cl->kill = 1;
				}
				break;
			case ACTION_CACHE_PUSH_OUT:
			{
#ifdef CS_CACHEEX
				ECM_REQUEST *er = data->ptr;
				int32_t res = 0, stats = -1;
				// cc-nodeid-list-check
				if(reader)
				{
					if(reader->ph.c_cache_push_chk && !reader->ph.c_cache_push_chk(cl, er))
						{ break; }
					res = reader->ph.c_cache_push(cl, er);
					stats = cacheex_add_stats(cl, er->caid, er->srvid, er->prid, 0);
				}
				else
				{
					if(module->c_cache_push_chk && !module->c_cache_push_chk(cl, er))
						{ break; }
					res = module->c_cache_push(cl, er);
				}
				debug_ecm(D_CACHEEX, "pushed ECM %s to %s res %d stats %d", buf, username(cl), res, stats);
				cl->cwcacheexpush++;
				if(cl->account)
					{ cl->account->cwcacheexpush++; }
				first_client->cwcacheexpush++;
#endif
				break;
			}
			case ACTION_CLIENT_KILL:
				cl->kill = 1;
				break;
			case ACTION_CLIENT_SEND_MSG:
			{
#ifdef MODULE_CCCAM
				struct s_clientmsg *clientmsg = (struct s_clientmsg *)data->ptr;
				cc_cmd_send(cl, clientmsg->msg, clientmsg->len, clientmsg->cmd);
#endif
				break;
			}
			} // switch

			__free_job_data(cl, data);
		}

		if(thread_pipe[1] && (mbuf[0] != 0x00))
		{
			cs_ddump_mask(D_TRACE, mbuf, 1, "[OSCAM-WORK] Write to pipe:");
			if(write(thread_pipe[1], mbuf, 1) == -1)    // wakeup client check
			{
				cs_debug_mask(D_TRACE, "[OSCAM-WORK] Writing to pipe failed (errno=%d %s)", errno, strerror(errno));
			}
		}

		// Check for some race condition where while we ended, another thread added a job
		pthread_mutex_lock(&cl->thread_lock);
		if(cl->joblist && ll_count(cl->joblist) > 0)
		{
			pthread_mutex_unlock(&cl->thread_lock);
			continue;
		}
		else
		{
			cl->thread_active = 0;
			pthread_mutex_unlock(&cl->thread_lock);
			break;
		}
	}
	cl->thread_active = 0;
	cl->work_mbuf = NULL; // Prevent free_client from freeing mbuf (->work_mbuf)
	NULLFREE(mbuf);
	pthread_exit(NULL);
	return NULL;
}
Ejemplo n.º 8
0
void free_client(struct s_client *cl)
{
    if (!cl)
        return;
    struct s_reader *rdr = cl->reader;

    // Remove client from client list. kill_thread also removes this client, so here just if client exits itself...
    struct s_client *prev, *cl2;
    cs_writelock(&clientlist_lock);
    cl->kill = 1;
    for (prev = first_client, cl2 = first_client->next;
            prev->next != NULL;
            prev = prev->next, cl2 = cl2->next)
    {
        if (cl == cl2)
            break;
    }
    if (cl == cl2)
        prev->next = cl2->next; // Remove client from list
    int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS;
    // Remove client from hashed list
    if (first_client_hashed[bucket] == cl) {
        first_client_hashed[bucket] = cl->nexthashed;
    } else {
        for (prev = first_client_hashed[bucket], cl2 = first_client_hashed[bucket]->nexthashed;
                prev->nexthashed != NULL;
                prev = prev->nexthashed, cl2 = cl2->nexthashed)
        {
            if (cl == cl2)
                break;
        }
        if (cl == cl2)
            prev->nexthashed = cl2->nexthashed;
    }
    cs_writeunlock(&clientlist_lock);

    // Clean reader. The cleaned structures should be only used by the reader thread, so we should be save without waiting
    if (rdr) {
        remove_reader_from_ecm(rdr);
        remove_reader_from_active(rdr);
        if(rdr->ph.cleanup)
            rdr->ph.cleanup(cl);
        if (cl->typ == 'r')
            cardreader_close(rdr);
        if (cl->typ == 'p')
            network_tcp_connection_close(rdr, "cleanup");
        cl->reader = NULL;
    }

    // Clean client specific data
    if (cl->typ == 'c') {
        cs_statistics(cl);
        cl->last_caid = 0xFFFF;
        cl->last_srvid = 0xFFFF;
        cs_statistics(cl);
        cs_sleepms(500); //just wait a bit that really really nobody is accessing client data
        struct s_module *module = get_module(cl);
        if (module->cleanup)
            module->cleanup(cl);
    }

    // Close network socket if not already cleaned by previous cleanup functions
    if (cl->pfd)
        close(cl->pfd);

    // Clean all remaining structures
    free_joblist(cl);

    cleanup_ecmtasks(cl);
    add_garbage(cl->emmcache);
#ifdef MODULE_CCCAM
    add_garbage(cl->cc);
#endif
#ifdef MODULE_SERIAL
    add_garbage(cl->serialdata);
#endif
    add_garbage(cl);
}
Ejemplo n.º 9
0
int32_t ghttp_client_init(struct s_client *cl)
{
	int32_t handle;
	char *str = NULL;

	if(cl->reader->r_port == 0)
		{ cl->reader->r_port = cl->reader->ghttp_use_ssl ? 443 : 80; }

	str = strstr(cl->reader->device, ".");
	if(!str)
	{
		char host[128];
		cs_strncpy(host, cl->reader->device, sizeof(cl->reader->device));
		snprintf(cl->reader->device, sizeof(cl->reader->device), "%s.appspot.com", host);
	}

	cs_log("%s: init google cache client %s:%d (fd=%d)", cl->reader->label, cl->reader->device, cl->reader->r_port, cl->udp_fd);

	if(cl->udp_fd) { network_tcp_connection_close(cl->reader, "re-init"); }

	handle = network_tcp_connection_open(cl->reader);
	if(handle < 0) { return -1; }

	cl->reader->tcp_connected = 2;
	cl->reader->card_status = CARD_INSERTED;
	cl->reader->last_g = cl->reader->last_s = time((time_t *)0);

	cl->pfd = cl->udp_fd;

	if(!cl->ghttp)
	{
		if(!cs_malloc(&(cl->ghttp), sizeof(s_ghttp))) { return -1; }
		memset(cl->ghttp, 0, sizeof(s_ghttp));
		((s_ghttp *)cl->ghttp)->post_contexts = ll_create("post contexts");
		((s_ghttp *)cl->ghttp)->ecm_q = ll_create("ecm queue");
	}
	else
	{
		ll_clear(((s_ghttp *)cl->ghttp)->ecm_q);
	}

	if(cl->reader->ghttp_use_ssl)
	{
#ifndef WITH_SSL
		cs_log("%s: use_ssl set but no ssl support available, aborting...", cl->reader->label);
		return -1;
#endif
#ifdef WITH_SSL
		if(ghttp_ssl_context == NULL) { return -1; }

		if(_ssl_connect(cl, handle))
		{
			cl->crypted = 1;
		}
		else
		{
			network_tcp_connection_close(cl->reader, "ssl failed");
			return -1;
		}
#endif
	}

	return 0;
}
Ejemplo n.º 10
0
static int32_t ghttp_recv_chk(struct s_client *client, uchar *dcw, int32_t *rc, uchar *buf, int32_t n)
{
	char *data;
	char *hdrstr;
	uchar *content;
	int rcode, len, clen = 0;
	s_ghttp *context = (s_ghttp *)client->ghttp;
	ECM_REQUEST *er = NULL;

	if(n < 5) { return -1; }

	data = strstr((char *)buf, "HTTP/1.1 ");
	if(!data || ll_count(context->ecm_q) > 6)
	{
		cs_debug_mask(D_CLIENT, "%s: non http or otherwise corrupt response: %s", client->reader->label, buf);
		cs_ddump_mask(D_CLIENT, buf, n, "%s: ", client->reader->label);
		network_tcp_connection_close(client->reader, "receive error");
		NULLFREE(context->session_id);
		ll_clear(context->ecm_q);
		return -1;
	}

	LL_ITER itr = ll_iter_create(context->ecm_q);
	er = (ECM_REQUEST *)ll_iter_next(&itr);

	rcode = _get_int_header(buf, "HTTP/1.1 ");
	clen = _get_int_header(buf, "Content-Length: ");

	content = (uchar *)(strstr(data, "\r\n\r\n") + 4);

	hdrstr = _get_header_substr(buf, "ETag: \"", "\"\r\n");
	if(hdrstr)
	{
		NULLFREE(context->host_id);
		context->host_id = (uchar *)hdrstr;
		cs_debug_mask(D_CLIENT, "%s: new name: %s", client->reader->label, context->host_id);
		len = b64decode(context->host_id);
		if(len == 0 || len >= 64)
		{
			NULLFREE(context->host_id);
		}
		else
		{
			cs_debug_mask(D_CLIENT, "%s: redirected...", client->reader->label);
			NULLFREE(context->session_id);
			ll_clear_data(ghttp_ignored_contexts);
			ll_clear(context->ecm_q);
			return -1;
		}
	}

	hdrstr = _get_header_substr(buf, "ETag: W/\"", "\"\r\n");
	if(hdrstr)
	{
		NULLFREE(context->fallback_id);
		context->fallback_id = (uchar *)hdrstr;
		cs_debug_mask(D_CLIENT, "%s: new fallback name: %s", client->reader->label, context->fallback_id);
		len = b64decode(context->fallback_id);
		if(len == 0 || len >= 64)
		{
			NULLFREE(context->fallback_id);
		}
	}

	hdrstr = _get_header(buf, "Set-Cookie: GSSID=");
	if(hdrstr)
	{
		NULLFREE(context->session_id);
		context->session_id = (uchar *)hdrstr;
		cs_debug_mask(D_CLIENT, "%s: set session_id to: %s", client->reader->label, context->session_id);
	}

	// buf[n] = '\0';
	// cs_ddump_mask(D_TRACE, content, clen, "%s: reply\n%s", client->reader->label, buf);

	if(rcode < 200 || rcode > 204)
	{
		cs_debug_mask(D_CLIENT, "%s: http error code %d", client->reader->label, rcode);
		data = strstr((char *)buf, "Content-Type: application/octet-stream"); // if not octet-stream, google error. need reconnect?
		if(data)    // we have error info string in the post content
		{
			if(clen > 0)
			{
				content[clen] = '\0';
				cs_debug_mask(D_CLIENT, "%s: http error message: %s", client->reader->label, content);
			}
		}
		if(rcode == 503)
		{
			if(er && _is_post_context(context->post_contexts, er, false))
			{
				if(_swap_hosts(context))
				{
					cs_debug_mask(D_CLIENT, "%s: switching to fallback", client->reader->label);
				}
				else
				{
					cs_debug_mask(D_CLIENT, "%s: recv_chk got 503 despite post, trying reconnect", client->reader->label);
					network_tcp_connection_close(client->reader, "reconnect");
					ll_clear(context->ecm_q);
				}
			}
			else
			{
				// on 503 cache timeout, retry with POST immediately (and switch to POST for subsequent)
				if(er)
				{
					_set_pid_status(context->post_contexts, er->onid, er->tsid, er->srvid, 0);
					cs_debug_mask(D_CLIENT, "%s: recv_chk got 503, trying direct post", client->reader->label);
					_ghttp_post_ecmdata(client, er);
				}
			}
		}
		else if(rcode == 401)
		{
			NULLFREE(context->session_id);
			if(er)
			{
				cs_debug_mask(D_CLIENT, "%s: session expired, trying direct post", client->reader->label);
				_ghttp_post_ecmdata(client, er);
			}
		}
		else if(rcode == 403)
		{
			client->reader->enable = 0;
			network_tcp_connection_close(client->reader, "login failure");
			ll_clear(context->ecm_q);
			cs_log("%s: invalid username/password, disabling reader.", client->reader->label);
		}

		// not sure if this is needed on failure, copied from newcamd
		*rc = 0;
		memset(dcw, 0, 16);

		return -1;
	}

	// successful http reply (200 ok or 204 no content)

	hdrstr = _get_header(buf,  "Pragma: context-ignore=");
	if(hdrstr)
	{
		if(clen > 1)
		{
			cs_ddump_mask(D_CLIENT, content, clen, "%s: pmt ignore reply - %s (%d pids)", client->reader->label, hdrstr, clen / 2);
			uint32_t onid = 0, tsid = 0, sid = 0;
			if(sscanf(hdrstr, "%4x-%4x-%4x", &onid, &tsid, &sid) == 3)
				{ _set_pids_status(ghttp_ignored_contexts, onid, tsid, sid, content, clen); }
			NULLFREE(hdrstr);
			return -1;
		}
		NULLFREE(hdrstr);
	}

	data = strstr((char *)buf, "Pragma: context-ignore-clear");
	if(data)
	{
		cs_debug_mask(D_CLIENT, "%s: clearing local ignore list (size %d)", client->reader->label, ll_count(ghttp_ignored_contexts));
		ll_clear_data(ghttp_ignored_contexts);
	}

	// switch back to cache get after rapid ecm response (arbitrary atm), only effect is a slight bw save for client
	if(!er || _is_post_context(context->post_contexts, er, false))
	{
		data = strstr((char *)buf, "Pragma: cached");
		if(data || (client->cwlastresptime > 0 && client->cwlastresptime < 640))
		{
			cs_debug_mask(D_CLIENT, "%s: probably cached cw (%d ms), switching back to cache get for next req", client->reader->label, client->cwlastresptime);
			if(er) { _is_post_context(context->post_contexts, er, true); }
		}
	}

	if(clen == 16)    // cw in content
	{
		memcpy(dcw, content, 16);
		*rc = 1;
		er = ll_remove_first(context->ecm_q);
		if(!er) { return -1; }
		cs_ddump_mask(D_TRACE, dcw, 16, "%s: cw recv chk for idx %d", client->reader->label, er->idx);
		return er->idx;
	}
	else
	{
		if(clen != 0) { cs_ddump_mask(D_CLIENT, content, clen, "%s: recv_chk fail, clen = %d", client->reader->label, clen); }
	}
	return -1;
}
Ejemplo n.º 11
0
void free_client(struct s_client *cl)
{
	if(!cl)
		{ return; }
	struct s_reader *rdr = cl->reader;

	// Remove client from client list. kill_thread also removes this client, so here just if client exits itself...
	struct s_client *prev, *cl2;
	cs_writelock(__func__, &clientlist_lock);
	if(!cl->kill_started)
	{
		cl->kill_started = 1;
	}
	else
	{
		cs_writeunlock(__func__, &clientlist_lock);
		cs_log("[free_client] ERROR: free already started!");
		return;
	}
	cl->kill = 1;
	for(prev = first_client, cl2 = first_client->next;
			prev->next != NULL;
			prev = prev->next, cl2 = cl2->next)
	{
		if(cl == cl2)
			{ break; }
	}
	if(cl == cl2)
		{ prev->next = cl2->next; } // Remove client from list
	int32_t bucket = (uintptr_t)cl / 16 % CS_CLIENT_HASHBUCKETS;
	// Remove client from hashed list
	if(first_client_hashed[bucket] == cl)
	{
		first_client_hashed[bucket] = cl->nexthashed;
	}
	else
	{
		for(prev = first_client_hashed[bucket], cl2 = first_client_hashed[bucket]->nexthashed;
				prev->nexthashed != NULL;
				prev = prev->nexthashed, cl2 = cl2->nexthashed)
		{
			if(cl == cl2)
				{ break; }
		}
		if(cl == cl2)
			{ prev->nexthashed = cl2->nexthashed; }
	}
	cs_writeunlock(__func__, &clientlist_lock);

	cleanup_ecmtasks(cl);

	// Clean reader. The cleaned structures should be only used by the reader thread, so we should be save without waiting
	if(rdr)
	{
		ll_destroy_data(&rdr->emmstat);
		remove_reader_from_active(rdr);

		cs_sleepms(1000); //just wait a bit that really really nobody is accessing client data

		if(rdr->ph.cleanup)
			{ rdr->ph.cleanup(cl); }
		if(cl->typ == 'r')
			{ cardreader_close(rdr); }
		if(cl->typ == 'p')
			{ network_tcp_connection_close(rdr, "cleanup"); }
		cl->reader = NULL;
	}

	// Clean client specific data
	if(cl->typ == 'c')
	{
		cs_statistics(cl);
		cl->last_caid = NO_CAID_VALUE;
		cl->last_provid = NO_PROVID_VALUE;
		cl->last_srvid = NO_SRVID_VALUE;
		cs_statistics(cl);

		cs_sleepms(1000); //just wait a bit that really really nobody is accessing client data
	}

	struct s_module *module = get_module(cl);
	if(module->cleanup)
		{ module->cleanup(cl); }

	// Close network socket if not already cleaned by previous cleanup functions
	if(cl->pfd)
		{ close(cl->pfd); }

	// Clean all remaining structures
	free_joblist(cl);
	NULLFREE(cl->work_mbuf);

	if(cl->ecmtask)
	{
		add_garbage(cl->ecmtask);
		cl->ecmtask = NULL;
	}

	ll_destroy_data(&cl->cascadeusers);

	ftab_clear(&cl->ftab);
	ftab_clear(&cl->fchid);
	tuntab_clear(&cl->ttab);
	caidtab_clear(&cl->ctab);

    NULLFREE(cl->cltab.aclass);
 	NULLFREE(cl->cltab.bclass);

	NULLFREE(cl->cw_rass);
	ll_destroy_data(&cl->ra_buf);
	NULLFREE(cl->aes_keys);

#ifdef MODULE_CCCAM
	add_garbage(cl->cc);
#endif
#ifdef MODULE_SERIAL
	add_garbage(cl->serialdata);
#endif
	add_garbage(cl);
}