Ejemplo n.º 1
0
void *write_thread(void *_ts) {
	struct ts *ts = _ts;
	struct packet *packet;

	mode_t umask_val = umask(0);
	dir_perm = (0777 & ~umask_val) | (S_IWUSR | S_IXUSR);

	set_thread_name("tsdump-write");
	while ((packet = queue_get(ts->packet_queue))) {
		if (!packet->data_len)
			continue;

		p_dbg1(" - Got packet %d, size: %u, file_time:%lu packet_time:%lu depth:%d\n",
			packet->num, packet->data_len, ALIGN_DOWN(packet->ts.tv_sec, ts->rotate_secs),
			packet->ts.tv_sec, ts->packet_queue->items);

		handle_files(ts, packet);

		if (ts->output_fd > -1) {
			p_dbg2(" - Writing into fd:%d size:%d file:%s\n", ts->output_fd, packet->data_len, ts->output_filename);
			ssize_t written = write(ts->output_fd, packet->data, packet->data_len);
			if (written != packet->data_len) {
				p_err("Can not write data (fd:%d written %zd of %d file:%s)",
					ts->output_fd, written, packet->data_len, ts->output_filename);
			}
		}
		free_packet(packet);
	}
	close_output_file(ts, NO_UNLINK);
	return NULL;
}
Ejemplo n.º 2
0
void log_list_thread(void)
{
	char buf[LOG_BUF_SIZE];
	log_running = 1;
	set_thread_name(__func__);
	do
	{
		log_list_queued = 0;
		LL_ITER it = ll_iter_create(log_list);
		struct s_log *log;
		while((log = ll_iter_next_remove(&it)))
		{
			int8_t do_flush = ll_count(log_list) == 0; //flush on writing last element

			cs_strncpy(buf, log->txt, LOG_BUF_SIZE);
			if(log->direct_log)
				{ cs_write_log(buf, do_flush); }
			else
				{ write_to_log(buf, log, do_flush); }
			NULLFREE(log->txt);
			NULLFREE(log);
		}
		if(!log_list_queued)  // The list is empty, sleep until new data comes in and we are woken up
			sleepms_on_cond(&log_thread_sleep_cond_mutex, &log_thread_sleep_cond, 60 * 1000);
	}
	while(log_running);
	ll_destroy(log_list);
	log_list = NULL;
}
Ejemplo n.º 3
0
static void *output_thread(void *arg)
{
	set_thread_name("output_thread");
	while (1)
		sleep(10);
	return NULL;
}
Ejemplo n.º 4
0
void OutputTask::Run()
{
	set_thread_name("OutputTask");

	MatchList ml;
	bool first_matchlist_printed = false;
	std::stringstream sstrm;

	while(m_input_queue.wait_pull(std::move(ml)) != queue_op_status::closed)
	{
		if(first_matchlist_printed && m_output_is_tty)
		{
			// Print a blank line between the match lists (i.e. the groups of matches in one file).
			std::cout << "\n";
		}
		ml.Print(sstrm, m_output_is_tty, m_enable_color, m_print_column);
		std::cout << sstrm.str();
		std::cout.flush();
		sstrm.str(std::string());
		sstrm.clear();
		first_matchlist_printed = true;

		// Count up the total number of matches.
		m_total_matched_lines += ml.GetNumberOfMatchedLines();
	}
}
Ejemplo n.º 5
0
void PipelinedModCodec::process_thread()
{
    set_thread_name(name());
    set_realtime_prio(1);

    while (m_running) {
        Buffer dataIn;
        m_input_queue.wait_and_pop(dataIn);

        if (dataIn.getLength() == 0) {
            break;
        }

        Buffer dataOut;
        dataOut.setLength(dataIn.getLength());

        if (internal_process(&dataIn, &dataOut) == 0) {
            m_running = false;
        }

        m_output_queue.push(std::move(dataOut));
    }

    m_running = false;
}
Ejemplo n.º 6
0
static void * reader_check(void) {
	struct s_client *cl;
	struct s_reader *rdr;
	set_thread_name(__func__);
	pthread_mutex_init(&reader_check_sleep_cond_mutex, NULL);
	pthread_cond_init(&reader_check_sleep_cond, NULL);
	while (!exit_oscam) {
		for (cl=first_client->next; cl ; cl=cl->next) {
			if (!cl->thread_active)
				client_check_status(cl);
		}
		cs_readlock(&readerlist_lock);
		for (rdr=first_active_reader; rdr; rdr=rdr->next) {
			if (rdr->enable) {
				cl = rdr->client;
				if (!cl || cl->kill)
					restart_cardreader(rdr, 0);
				else if (!cl->thread_active)
					client_check_status(cl);
			}
		}
		cs_readunlock(&readerlist_lock);
		sleepms_on_cond(&reader_check_sleep_cond, &reader_check_sleep_cond_mutex, 1000);
	}
	return NULL;
}
Ejemplo n.º 7
0
static void * card_poll(void) {
	struct s_client *cl;
	struct s_reader *rdr;
	pthread_mutex_t card_poll_sleep_cond_mutex;
	SAFE_MUTEX_INIT(&card_poll_sleep_cond_mutex, NULL);
	SAFE_COND_INIT(&card_poll_sleep_cond, NULL);
	set_thread_name(__func__);
	while (!exit_oscam) {
		cs_readlock(__func__, &readerlist_lock);
		for (rdr=first_active_reader; rdr; rdr=rdr->next) {
			if (rdr->enable && rdr->card_status == CARD_INSERTED) {
				cl = rdr->client;
				if (cl && !cl->kill)
					{ add_job(cl, ACTION_READER_POLL_STATUS, 0, 0); }
			}
		}
		cs_readunlock(__func__, &readerlist_lock);
		struct timespec ts;
		struct timeval tv;
		gettimeofday(&tv, NULL);
		ts.tv_sec = tv.tv_sec;
		ts.tv_nsec = tv.tv_usec * 1000;
		ts.tv_sec += 1;
		SAFE_MUTEX_LOCK(&card_poll_sleep_cond_mutex);
		SAFE_COND_TIMEDWAIT(&card_poll_sleep_cond, &card_poll_sleep_cond_mutex, &ts); // sleep on card_poll_sleep_cond
		SAFE_MUTEX_UNLOCK(&card_poll_sleep_cond_mutex);
	}
	return NULL;
}
Ejemplo n.º 8
0
void subscriber(zsock_t *pipe, void *args)
{
    set_thread_name("subscriber[0]");

    int rc;
    zconfig_t* config = args;
    subscriber_state_t *state = subscriber_state_new(pipe, config, hosts);

    // signal readyiness after sockets have been created
    zsock_signal(pipe, 0);

    // subscribe to either all messages, or a subset
    setup_subscriptions(state);

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);

    // setup handler for actor messages
    rc = zloop_reader(loop, state->pipe, actor_command, state);
    assert(rc == 0);

     // setup handler for the sub socket
    rc = zloop_reader(loop, state->sub_socket, read_request_and_forward, state);
    assert(rc == 0);

    // setup handler for the router socket
    rc = zloop_reader(loop, state->router_socket, read_router_request_forward, state);
    assert(rc == 0);

    // setup handler for the pull socket
    rc = zloop_reader(loop, state->pull_socket, read_request_and_forward, state);
    assert(rc == 0);

    // run the loop
    if (!quiet)
        fprintf(stdout, "[I] subscriber: listening\n");

    bool should_continue_to_run = getenv("CPUPROFILE") != NULL;
    do {
        rc = zloop_start(loop);
        should_continue_to_run &= errno == EINTR;
        log_zmq_error(rc, __FILE__, __LINE__);
    } while (should_continue_to_run);

    if (!quiet)
        fprintf(stdout, "[I] subscriber: shutting down\n");

    // shutdown
    subscriber_state_destroy(&state);
    zloop_destroy(&loop);
    assert(loop == NULL);

    if (!quiet)
        fprintf(stdout, "[I] subscriber: terminated\n");
}
Ejemplo n.º 9
0
/*
 Work threads are named like this:
   w[r|c]XX-[rdr->label|client->username]

   w      - work thread prefix
   [r|c]  - depending whether the the action is related to reader or client
   XX     - two digit action code from enum actions
   label  - reader label or client username (see username() function)
*/
static void set_work_thread_name(struct job_data *data)
{
	char thread_name[16 + 1];
	snprintf(thread_name, sizeof(thread_name), "w%c%02d-%s",
			 data->action < ACTION_CLIENT_FIRST ? 'r' : 'c',
			 data->action,
			 username(data->cl)
			);
	set_thread_name(thread_name);
}
void InnerUdtServer::accept() {
  std::shared_ptr<InnerUdtConnection> conn = std::make_shared<InnerUdtConnection>();
  std::thread th([this, conn](){
    try {
      set_thread_name("idgs_io");
      this->handle_accept(conn);
    } catch (std::exception &e) {
      LOG(ERROR) << "Get exception in IO thread: " << e.what();
    } catch (...) {
      catchUnknownException();
    }
  });
}
Ejemplo n.º 11
0
static void *camd_thread(void *in_ts) {
	struct ts *ts = in_ts;

	set_thread_name("tsdec-camd");

	while (1) {
		struct camd_msg *msg;
		void *req = queue_get(ts->camd.req_queue); // Waits...
		if (ts->camd_stop)
			break;
		if (!req)
			continue;
		msg = queue_get_nowait(ts->camd.ecm_queue);
		if (!msg)
			msg = queue_get_nowait(ts->camd.emm_queue);
		if (!msg)
			continue;
		camd_do_msg(msg);

		if (ts->camd.ecm_queue->items >= ECM_QUEUE_HARD_LIMIT) {
			ts_LOGf("WRN | Too much items (%d) in ECM queue, dropping the oldest.\n", ts->camd.ecm_queue->items);
			while(ts->camd.ecm_queue->items >= ECM_QUEUE_SOFT_LIMIT) {
				msg = queue_get_nowait(ts->camd.ecm_queue);
				camd_msg_free(&msg);
			}
		}

		if (ts->camd.emm_queue->items >= EMM_QUEUE_HARD_LIMIT) {
			ts_LOGf("WRN | Too much items (%d) in EMM queue, dropping the oldest.%s\n",
				ts->camd.emm_queue->items, ts->camd.ops.proto == CAMD_NEWCAMD ?
				" Consider switching to cs378x protocol!" : "");
			while(ts->camd.emm_queue->items >= EMM_QUEUE_SOFT_LIMIT) {
				msg = queue_get_nowait(ts->camd.emm_queue);
				camd_msg_free(&msg);
			}
		}
	}
	// Flush ECM queue
	while (ts->camd.ecm_queue->items) {
		struct camd_msg *msg = queue_get_nowait(ts->camd.ecm_queue);
		camd_msg_free(&msg);
	}
	// Flush EMM queue
	while (ts->camd.emm_queue->items) {
		struct camd_msg *msg = queue_get_nowait(ts->camd.emm_queue);
		camd_msg_free(&msg);
	}

	pthread_exit(EXIT_SUCCESS);
}
Ejemplo n.º 12
0
static void * oscam_ser_fork(void *pthreadparam)
{
  struct s_thread_param *pparam = (struct s_thread_param *) pthreadparam;
  struct s_client *cl=create_client(get_null_ip());
  pthread_setspecific(getclient, cl);
  cl->thread=pthread_self();
  cl->typ='c';
  cl->module_idx = pparam->module_idx;
  cl->account=first_client->account;

  if (!cl->serialdata && !cs_malloc(&cl->serialdata, sizeof(struct s_serial_client)))
    return NULL;

  set_thread_name(__func__);
  oscam_init_serialdata(cl->serialdata);
  oscam_copy_serialdata(cl->serialdata, &pparam->serialdata);
  if (cl->serialdata->oscam_ser_port > 0)
  {
    // reader struct for serial network connection
    struct s_reader *newrdr;
    if (!cs_malloc(&newrdr, sizeof(struct s_reader)))
      return NULL;
    memset(newrdr, 0, sizeof(struct s_reader));
    newrdr->client = cl;
    newrdr->ph = *serial_ph;
    cl->reader = newrdr;
    cs_strncpy(cl->reader->label, "network-socket", sizeof(cl->reader->label));
  }
  cs_log("serial: initialized (%s@%s)", cl->serialdata->oscam_ser_proto>P_MAX ?
         "auto" : proto_txt[cl->serialdata->oscam_ser_proto], cl->serialdata->oscam_ser_device);

  pthread_mutex_lock(&mutex);
  bcopy_end = 1;
  pthread_mutex_unlock(&mutex);
  pthread_cond_signal(&cond);

  while(1)
  {
    cl->login=time((time_t *)0);
    cl->pfd=init_oscam_ser_device(cl);
    if (cl->pfd)
      oscam_ser_server();
    else
      cs_sleepms(60000);	// retry in 1 min. (USB-Device ?)
    if (cl->pfd) close(cl->pfd);
  }
  NULLFREE(cl->serialdata);
  NULLFREE(cl->reader);
  return NULL;
}
int graylog_forwarder_run_controller_loop(zconfig_t* config, zlist_t* devices, zlist_t *subscriptions, int rcv_hwm, int send_hwm)
{
    set_thread_name("controller");

    zsys_init();

    controller_state_t state = {.config = config};
    bool start_up_complete = controller_create_actors(&state, devices, subscriptions, rcv_hwm, send_hwm);

    if (!start_up_complete)
        goto exit;

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);

    // send tick commands every second
    int rc = zloop_timer(loop, 1000, 1, send_tick_commands, &state);
    assert(rc != -1);

    // run the loop
    // when running under the google profiler, zmq_poll terminates with EINTR
    // so we keep the loop running in this case
    if (!zsys_interrupted) {
        bool should_continue_to_run = getenv("CPUPROFILE") != NULL;
        do {
            rc = zloop_start(loop);
            should_continue_to_run &= errno == EINTR && !zsys_interrupted;
            log_zmq_error(rc, __FILE__, __LINE__);
        } while (should_continue_to_run);
    }
    printf("[I] controller: shutting down\n");

    // shutdown
    zloop_destroy(&loop);
    assert(loop == NULL);

 exit:
    printf("[I] controller: destroying actor threads\n");
    controller_destroy_actors(&state);
    printf("[I] controller: calling zsys_shutdown\n");
    zsys_shutdown();

    printf("[I] controller: terminated\n");
    return 0;
}
Ejemplo n.º 14
0
void WorkerThread::run()
{
    set_thread_name();

    while (!m_abort_switch.is_aborted())
    {
        if (m_pause_flag.is_set())
        {
            // Wait until the resume event.
            boost::mutex::scoped_lock lock(m_pause_mutex);
            while (m_pause_flag.is_set())
                m_pause_event.wait(lock);
        }

        // Acquire a job.
        const JobQueue::RunningJobInfo running_job_info =
            m_job_queue.wait_for_scheduled_job(m_abort_switch);

        // Handle the case where the job queue is empty.
        if (running_job_info.first.m_job == nullptr)
        {
            if (m_flags & JobManager::KeepRunningOnEmptyQueue)
            {
                // Keep the thread running and waiting for new jobs.
                continue;
            }
            else
            {
                // Terminate the thread.
                break;
            }
        }

        // Execute the job.
        const bool success = execute_job(*running_job_info.first.m_job);

        // Retire the job.
        m_job_queue.retire_running_job(running_job_info);

        // Handle job execution failures.
        if (!success && !(m_flags & JobManager::KeepRunningOnJobFailure))
        {
            m_job_queue.clear_scheduled_jobs();
            break;
        }
    }
}
static
void graylog_forwarder_subscriber(zsock_t *pipe, void *args)
{
    set_thread_name("graylog-forwarder-subscriber");

    int rc;
    subscriber_state_t *state = subscriber_state_new(pipe, args);

    // signal readyiness after sockets have been created
    zsock_signal(pipe, 0);

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);
    // we rely on the controller shutting us down
    zloop_ignore_interrupts(loop);

    // setup handler for actor messages
    rc = zloop_reader(loop, state->pipe, actor_command, state);
    assert(rc == 0);

    // setup handler for the sub socket
    rc = zloop_reader(loop, state->sub_socket, read_request_and_forward, state);
    assert(rc == 0);

    // run the loop
    fprintf(stdout, "[I] subscriber: listening\n");

    bool should_continue_to_run = getenv("CPUPROFILE") != NULL;
    do {
        rc = zloop_start(loop);
        should_continue_to_run &= errno == EINTR;
        if (!zsys_interrupted)
            log_zmq_error(rc, __FILE__, __LINE__);
    } while (should_continue_to_run);

    fprintf(stdout, "[I] subscriber: shutting down\n");

    // shutdown
    subscriber_state_destroy(&state);
    zloop_destroy(&loop);
    assert(loop == NULL);

    fprintf(stdout, "[I] subscriber: terminated\n");
}
Ejemplo n.º 16
0
void rabbitmq_listener(zsock_t *pipe, void* args)
{
    set_thread_name("rabbit-consumer");

    // signal readyiness immediately so that zmq publishers are already processed
    // while the rabbitmq exchanges/queues/bindings are created
    zsock_signal(pipe, 0);

    amqp_connection_state_t conn = setup_amqp_connection();
    int last_channel = rabbitmq_setup_queues(conn, (zlist_t*)args);

    // connect to the receiver socket
    zsock_t *receiver = zsock_new(ZMQ_PUSH);
    zsock_set_sndhwm(receiver, 10000);
    zsock_connect(receiver, "inproc://receiver");

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);
    zloop_ignore_interrupts(loop);

    // register actor command handler
    int rc = zloop_reader(loop, pipe, pipe_command, NULL);
    assert(rc==0);

    // register rabbitmq socket for pollin events
    zmq_pollitem_t rabbit_item = {
        .fd = amqp_get_sockfd(conn),
        .events = ZMQ_POLLIN
    };
    rabbit_listener_state_t listener_state = {
        .conn = conn,
        .receiver = zsock_resolve(receiver)
    };
    rc = zloop_poller(loop, &rabbit_item, rabbitmq_consume_message_and_forward, &listener_state);
    assert(rc==0);

    // start event loop
    zloop_start(loop);

    // shutdown
    zloop_destroy(&loop);
    zsock_destroy(&receiver);
    shutdown_amqp_connection(conn, 0, last_channel);
}
Ejemplo n.º 17
0
static void *doomer_thread_(void unused_ *dummy)
{
    set_thread_name("J-doomer");
    int old_state;
    if (0 != pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state)) {
        SLOG(LOG_CRIT, "Cannot set cancelstate of Doomer-thread");
    }
    assert(old_state == PTHREAD_CANCEL_ENABLE);
    if (0 != pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_state)) {
        SLOG(LOG_CRIT, "Cannot set canceltype of Doomer-thread");
    }
    assert(old_state == PTHREAD_CANCEL_DEFERRED);
    while (1) {
        doomer_run();
        sleep(1);
    }
    return NULL;
}
Ejemplo n.º 18
0
void watchdog(zsock_t *pipe, void *args)
{
    set_thread_name("watchdog[0]");

    int rc;
    watchdog_state_t state = { .credit = CREDIT, .received_term_cmd = false };

    // signal readyiness
    zsock_signal(pipe, 0);

    // set up event loop
    zloop_t *loop = zloop_new();
    assert(loop);
    zloop_set_verbose(loop, 0);
    // we rely on the controller shutting us down
    zloop_ignore_interrupts(loop);

    // decrease credit every second
    rc = zloop_timer(loop, 1000, 0, timer_event, &state);
    assert(rc != -1);

    // setup handler for actor messages
    rc = zloop_reader(loop, pipe, actor_command, &state);
    assert(rc == 0);

    // run the loop
    bool should_continue_to_run = getenv("CPUPROFILE") != NULL;
    do {
        rc = zloop_start(loop);
        should_continue_to_run &= errno == EINTR;
        if (!state.received_term_cmd)
            log_zmq_error(rc, __FILE__, __LINE__);
    } while (should_continue_to_run);

    if (!quiet)
        printf("[I] watchdog[0]: shutting down\n");

    // shutdown
    zloop_destroy(&loop);
    assert(loop == NULL);

    if (!quiet)
        printf("[I] watchdog[0]: terminated\n");
}
Ejemplo n.º 19
0
void *component_worker::component_worker_thread_main(component_worker *arg)
{
	std::string thname;

	//スレッド名をつける
	thname = "omx:wrk:";
	thname += arg->get_name();
	set_thread_name(thname.c_str());

	try {
		//メイン処理が始まったことを通知する
		arg->set_running(true);
		arg->set_request_restart(true);

		while (arg->is_running()) {
			arg->wait_request_restart();
			if (!arg->is_running()) {
				break;
			}
			arg->set_restart_done(true);

			try {
				arg->run();
			} catch (const mf::interrupted_error& e) {
				infoprint("interrupted: worker %s: %s\n",
					arg->get_name(), e.what());
			} catch (const std::runtime_error& e) {
				errprint("runtime_error: worker %s: %s\n",
					arg->get_name(), e.what());
			}
			arg->set_request_flush(false);
			arg->set_flush_done(true);
		}
	} catch (const mf::interrupted_error& e) {
		infoprint("interrupted: worker %s: %s\n",
			arg->get_name(), e.what());
	} catch (const std::runtime_error& e) {
		errprint("runtime_error: worker %s: %s\n",
			arg->get_name(), e.what());
	}

	return nullptr;
}
Ejemplo n.º 20
0
static
void parser(zsock_t *pipe, void *args)
{
    parser_state_t *state = (parser_state_t*)args;
    state->pipe = pipe;
    set_thread_name(state->me);
    size_t id = state->id;

    // signal readyiness after sockets have been created
    zsock_signal(pipe, 0);

    zpoller_t *poller = zpoller_new(state->pipe, state->pull_socket, NULL);
    assert(poller);

    while (!zsys_interrupted) {
        // -1 == block until something is readable
        void *socket = zpoller_wait(poller, -1);
        zmsg_t *msg = NULL;
        if (socket == state->pipe) {
            msg = zmsg_recv(state->pipe);
            char *cmd = zmsg_popstr(msg);
            zmsg_destroy(&msg);
            if (streq(cmd, "$TERM")) {
                fprintf(stderr, "[D] parser [%zu]: received $TERM command\n", id);
                free(cmd);
                break;
            } else {
                fprintf(stderr, "[E] parser [%zu]: received unknown command: %s\n", id, cmd);
                free(cmd);
                assert(false);
            }
        } else if (socket == state->pull_socket) {
            process_logjam_message(state);
        } else {
            // socket == NULL, probably interrupted by signal handler
            break;
        }
    }

    printf("[I] parser [%zu]: shutting down\n", id);
    parser_state_destroy(&state);
    printf("[I] parser [%zu]: terminated\n", id);
}
Ejemplo n.º 21
0
/// @todo specified exception should be caught.
void ScheduledMessageService::run() {
  DVLOG(2) << "ScheduledMessageService is Running";
  set_thread_name("idgs_sched");

  while (m_is_started.load() == ACTIVE) {
    unsigned long now = sys::getCurrentTime();
    try {
      //DVLOG(2) << "current time is " << now;
      MapType::iterator it;
      for (it = tasks.begin(); it != tasks.end(); ++it) {
        fire(now, it->second);
      }
    } catch (std::exception &e) {
      LOG(ERROR)<< e.what();
    } catch(...) {
      catchUnknownException();
    }

    std::this_thread::sleep_for(dura);
  }
  DVLOG(2) << "ScheduledMessageService is out of running";
}
Ejemplo n.º 22
0
static void *arm_led_thread_main(void *UNUSED(thread_data))
{
	uint8_t running = 1;
	set_thread_name(__func__);
	while(running)
	{
		LL_ITER iter = ll_iter_create(arm_led_actions);
		struct s_arm_led *arm_led;
		while((arm_led = ll_iter_next(&iter)))
		{
			int32_t led, action;
			time_t now, start;
			led = arm_led->led;
			action = arm_led->action;
			now = time((time_t)0);
			start = arm_led->start_time;
			ll_iter_remove_data(&iter);
			if(action == LED_STOP_THREAD)
			{
				running = 0;
				break;
			}
			if(now - start < ARM_LED_TIMEOUT)
			{
				arm_switch_led_from_thread(led, action);
			}
		}
		if(running)
		{
			sleep(60);
		}
	}
	ll_clear_data(arm_led_actions);
	pthread_exit(NULL);
	return NULL;
}
Ejemplo n.º 23
0
void FileScanner::Run(int thread_index)
{
	// Set the name of the thread.
	std::stringstream temp_ss;
	temp_ss << "FILESCAN_";
	temp_ss << thread_index;
	set_thread_name(temp_ss.str());

	if(m_manually_assign_cores)
	{
		// Spread the scanner threads across cores.  Linux at least doesn't seem to want to do that by default.
		AssignToNextCore();
	}

	// Create a reusable, resizable buffer for the File() reads.
	auto file_data_storage = std::make_shared<ResizableArray<char>>();

	// Pull new filenames off the input queue until it's closed.
	std::string next_string;
	while(m_in_queue.wait_pull(std::move(next_string)) != queue_op_status::closed)
	{
		MatchList ml(next_string);

		try
		{
			// Try to open and read the file.  This could throw.
			LOG(INFO) << "Attempting to scan file \'" << next_string << "\'";
			File f(next_string, file_data_storage);

			if(f.size() == 0)
			{
				LOG(INFO) << "WARNING: Filesize of \'" << next_string << "\' is 0, skipping.";
				continue;
			}

			const char *file_data = f.data();
			size_t file_size = f.size();

			// Scan the file data for occurrences of the regex, sending matches to the MatchList ml.
			ScanFile(file_data, file_size, ml);

			if(!ml.empty())
			{
				// Force move semantics here.
				m_output_queue.wait_push(std::move(ml));
			}
		}
		catch(const FileException &error)
		{
			// The File constructor threw an exception.
			ERROR() << error.what();
		}
		catch(const std::system_error& error)
		{
			// A system error.  Currently should only be errors from File.
			ERROR() << error.code() << " - " << error.code().message();
		}
		catch(...)
		{
			// Rethrow whatever it was.
			throw;
		}
	}
}
Ejemplo n.º 24
0
void Audio_Stream_Player::run()
{
#ifdef DEBUG_OUTPUT
    set_thread_name("Audio_Stream_Player::run");
#endif
    //	my_out << "Audio_Stream_Player run() started, sleeping " << (owner->getMaxDelay_ms() + intraWakePeriod_ms) / 2 << "ms" << std::endl;
    my_sleep(_owner->get_max_delay_ms() / 2);
    _run_wake_up_time_us = utime();
    while(!_quit)
    {
        int delay_us = utime() - _time_of_last_enqueue_us;
        _queue_mutex.lock();
        if(!queue.empty())
            delay_us += queue.back().timestamp_us - queue.front().timestamp_us;
        //my_out << "Audio_Stream_Player run() locking the mutex" << std::endl;
        while(1)
        {
            bool queueEmpty = queue.empty();
            if(queueEmpty)
            {
                //my_out << "Audio_Stream_Player run() unlocking the mutex due to empty queue" << std::endl;
                _queue_mutex.unlock();
            }
            while(queue.empty())
            {
                if(_quit)
                    return;
                if(_buffer_underflow_count++ % (1000000 / BUFFER_OVERFLOW_UNDERFLOW_MODIFIER_us) == 0)
                    my_dbg << "Audio_Stream_Player buffer underflow, moving the wake timer forward" << std::endl;

                _run_wake_up_time_us += BUFFER_OVERFLOW_UNDERFLOW_MODIFIER_us;
                my_sleep(BUFFER_OVERFLOW_UNDERFLOW_MODIFIER_us / 1000);
            }

            _buffer_underflow_count = 0;
            if(queueEmpty)
            {
                //my_out << "Audio_Stream_Player run() relocking the mutex due to empty queue" << std::endl;
                _queue_mutex.lock();
            }
            Audio_Stream_Player_Queue_Element &first = queue.front();
            if(first.timestamp_us > _playback_time_us)
            {
                //my_out << "Audio_Stream_Player run() breaking" << std::endl;
                break;
            }
            my_dbg << "Audio_Stream_Player popped frame " << (uint64_t)(first.timestamp_us / 1000) << "ms" << std::endl;
            _sink->push_sound(first.buffer, first.nSamples, first.seqNo, first.sfreq, first.stereo);
            delete[] first.buffer;
            queue.pop_front();
        }
        //my_out << "Audio_Stream_Player run() unlocking the mutex" << std::endl;
        _queue_mutex.unlock();
        _run_wake_up_time_us += _intra_wake_period_ms * 1000;
        bool bufferOverflow = (delay_us >= 1000 * _owner->get_max_delay_ms());
        if(bufferOverflow)
        {
            _playback_time_us += BUFFER_OVERFLOW_UNDERFLOW_MODIFIER_us;
            //merr << "Audio_Stream_Player buffer overflow, moving the playback timer forward by " << BUFFER_OVERFLOW_UNDERFLOW_MODIFIER_us / 1000 << "ms" << std::endl;
        }
        if(_ordered_delay_us >= 0)
        {
            int delayDiff_us = _ordered_delay_us - delay_us;
            if(abs(delayDiff_us) > SYNCHRONIZATION_TOLERATION_us)
            {
                _playback_time_us -= delayDiff_us;
                my_dbg << "Audio_Stream_Player applying ordered delay, modifying the playback timer by " << -delayDiff_us / 1000 << "ms" << std::endl;
            }
            _ordered_delay_us = -1;
        }
        _playback_time_us += _intra_wake_period_ms * 1000;
        int sleepTime_ms = (_run_wake_up_time_us - int64_t(utime())) / 1000;
        //my_dbg << "Audio_Stream_Player run() sleeping " << _intra_wake_period_ms << "ms" << std::endl;
        if(sleepTime_ms > 0)
            my_sleep(sleepTime_ms);
    }
}
void *
sge_worker_main(void *arg)
{
   bool do_endlessly = true;
   cl_thread_settings_t *thread_config = (cl_thread_settings_t*)arg;
   sge_gdi_ctx_class_t *ctx = NULL;
   monitoring_t monitor;
   monitoring_t *monitorp = &monitor;
   time_t next_prof_output = 0;

   DENTER(TOP_LAYER, "sge_worker_main");

   DPRINTF(("started"));
   cl_thread_func_startup(thread_config);
   sge_monitor_init(&monitor, thread_config->thread_name, GDI_EXT, MT_WARNING, MT_ERROR);
   sge_qmaster_thread_init(&ctx, QMASTER, WORKER_THREAD, true);

   /* register at profiling module */
   set_thread_name(pthread_self(), "Worker Thread");
   conf_update_thread_profiling("Worker Thread");
 
   while (do_endlessly) {
      sge_gdi_packet_class_t *packet = NULL;

      /*
       * Wait for packets. As long as packets are available cancelation 
       * of this thread is ignored. The shutdown procedure in the main 
       * thread takes care that packet producers will be terminated 
       * before all worker threads so that this won't be a problem.
       */
      MONITOR_IDLE_TIME(
         sge_tq_wait_for_task(Master_Task_Queue, 1, SGE_TQ_GDI_PACKET, (void *)&packet),
         &monitor, mconf_get_monitor_time(), mconf_is_monitor_message());

      MONITOR_SET_QLEN((monitorp), sge_tq_get_task_count(Master_Task_Queue));

      if (packet != NULL) {
         sge_gdi_task_class_t *task = packet->first_task;
         bool is_only_read_request = true;

         thread_start_stop_profiling();

#ifdef SEND_ANSWER_IN_LISTENER
#else
         /*
          * prepare buffer for sending an answer 
          */
         if (packet->is_intern_request == false && packet->is_gdi_request == true) {
            init_packbuffer(&(packet->pb), 0, 0);
         }
#endif

         MONITOR_MESSAGES((monitorp));

         if (packet->is_gdi_request == true) {
            /*
             * test if a write lock is necessary
             */
            task = packet->first_task;
            while (task != NULL) {
               u_long32 command = SGE_GDI_GET_OPERATION(task->command); 

               if (command != SGE_GDI_GET) {
                  is_only_read_request = false;
                  break;
               }
               task = task->next;            
            }
         } else {
            is_only_read_request = false;
         }

         /*
          * acquire the correct lock
          */
         if (is_only_read_request) {
            MONITOR_WAIT_TIME(SGE_LOCK(LOCK_GLOBAL, LOCK_READ), monitorp);
         } else {
            MONITOR_WAIT_TIME(SGE_LOCK(LOCK_GLOBAL, LOCK_WRITE), monitorp);
         }

         if (packet->is_gdi_request == true) {
            /*
             * do the GDI request
             */
            task = packet->first_task;
            while (task != NULL) {
               sge_c_gdi(ctx, packet, task, &(task->answer_list), &monitor);

               task = task->next;
            }
         } else {
            task = packet->first_task;
            sge_c_report(ctx, packet->host, packet->commproc, packet->commproc_id, 
                         task->data_list, &monitor);
         }

         /*
          * do unlock
          */
         if (is_only_read_request) {
            SGE_UNLOCK(LOCK_GLOBAL, LOCK_READ)
         } else {
            SGE_UNLOCK(LOCK_GLOBAL, LOCK_WRITE)
         }

         if (packet->is_gdi_request == true) {
#ifdef SEND_ANSWER_IN_LISTENER
            sge_gdi_packet_broadcast_that_handled(packet);
#else
            /*
             * Send the answer to the client
             */
            if (packet->is_intern_request == false) {
               MONITOR_MESSAGES_OUT(monitorp);
               sge_gdi2_send_any_request(ctx, 0, NULL,
                                         packet->host, packet->commproc, packet->commproc_id, 
                                         &(packet->pb), TAG_GDI_REQUEST, 
                                         packet->response_id, NULL);
               clear_packbuffer(&(packet->pb));
#  ifdef BLOCK_LISTENER
               sge_gdi_packet_broadcast_that_handled(packet);
#  else
               sge_gdi_packet_free(&packet);
#  endif
               /*
                * Code only for TS: 
                *
                * Following if-block will only be executed in testsuite if the qmaster
                * parameter __TEST_SLEEP_AFTER_REQUEST is defined. This will block the
                * worker thread if it handled a request. Only this makes sure that
                * other worker threads can handle incoming requests. Otherwise
                * it might be possible that one worker threads handles all requests
                * on fast qmaster hosts if testsuite is not fast enough to generate
                * gdi requests.
                */
               if (mconf_get_enable_test_sleep_after_request() == true) {
                  sleep(5);
               }
            } else {
               sge_gdi_packet_broadcast_that_handled(packet);
               /* this is an internal request, packet will get destroyed later,
                * where the caller waits for the answer
                * make sure it is no longer accessed here
                */
               packet = NULL;
            }
#endif
         } else {
            sge_gdi_packet_free(&packet);
         }
     
         thread_output_profiling("worker thread profiling summary:\n",
                                 &next_prof_output);

         sge_monitor_output(&monitor);
      } else { 
Ejemplo n.º 26
0
static void *chkcache_process(void)
{
    set_thread_name(__func__);

    time_t timeout;
    struct ecm_request_t *er, *ecm;
#ifdef CS_CACHEEX
    uint8_t add_hitcache_er;
    struct s_reader *cl_rdr;
    struct s_reader *rdr;
    struct s_ecm_answer *ea;
    struct s_client *cex_src=NULL;
#endif
    struct s_write_from_cache *wfc=NULL;

    while(1)
    {
        cs_readlock(&ecmcache_lock);
        for(er = ecmcwcache; er; er = er->next)
        {
            timeout = time(NULL)-((cfg.ctimeout+500)/1000+1);
            if(er->tps.time < timeout)
            {
                break;
            }

            if(er->rc<E_UNHANDLED || er->readers_timeout_check)  //already answered
            {
                continue;
            }

            //********  CHECK IF FOUND ECM IN CACHE
            ecm = check_cache(er, er->client);
            if(ecm)     //found in cache
            {

#ifdef CS_CACHEEX
                //check for add_hitcache
                if(ecm->cacheex_src)   //cw from cacheex
                {
                    if((er->cacheex_wait_time && !er->cacheex_wait_time_expired) || !er->cacheex_wait_time)   //only when no wait_time expires (or not wait_time)
                    {

                        //add_hitcache already called, but we check if we have to call it for these (er) caid|prid|srvid
                        if(ecm->prid!=er->prid || ecm->srvid!=er->srvid)
                        {
                            cex_src = ecm->cacheex_src && is_valid_client(ecm->cacheex_src) && !ecm->cacheex_src->kill ?  ecm->cacheex_src : NULL; //here we should be sure cex client has not been freed!
                            if(cex_src) { //add_hitcache only if client is really active
                                add_hitcache_er=1;
                                cl_rdr = cex_src->reader;
                                if(cl_rdr && cl_rdr->cacheex.mode == 2)
                                {
                                    for(ea = er->matching_rdr; ea; ea = ea->next)
                                    {
                                        rdr = ea->reader;
                                        if(cl_rdr == rdr && ((ea->status & REQUEST_ANSWERED) == REQUEST_ANSWERED))
                                        {
                                            cs_debug_mask(D_CACHEEX|D_CSP|D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [CACHEEX] skip ADD self request!", (check_client(er->client)?er->client->account->usr:"******"),er->caid, er->prid, er->srvid);
                                            add_hitcache_er=0; //don't add hit cache, reader requested self
                                        }
                                    }
                                }

                                if(add_hitcache_er)
                                {
                                    add_hitcache(cex_src, er);    //USE cacheex client (to get correct group) and ecm from requesting client (to get correct caid|prid|srvid)!!!
                                }
                            }
                        }

                    }
                    else
                    {
                        //add_hitcache already called, but we have to remove it because cacheex not coming before wait_time
                        if(ecm->prid==er->prid && ecm->srvid==er->srvid)
                        {
                            del_hitcache(ecm);
                        }
                    }
                }
                //END check for add_hitcache
#endif

                if(check_client(er->client))
                {

                    wfc=NULL;
                    if(!cs_malloc(&wfc, sizeof(struct s_write_from_cache)))
                    {
                        NULLFREE(ecm);
                        continue;
                    }

                    wfc->er_new=er;
                    wfc->er_cache=ecm;

                    if(!add_job(er->client, ACTION_ECM_ANSWER_CACHE, wfc, sizeof(struct s_write_from_cache)))   //write_ecm_answer_fromcache
                    {
                        NULLFREE(ecm);
                        continue;
                    }
                }
                else
                {
                    NULLFREE(ecm);
                }
            }
        }
        cs_readunlock(&ecmcache_lock);

        cs_sleepms(10);
    }

    return NULL;
}
Ejemplo n.º 27
0
static SCM g_set_thread_name(SCM name_)
{
    char *name = scm_to_tempstr(name_);
    set_thread_name(name);
    return SCM_UNSPECIFIED;
}
Ejemplo n.º 28
0
/****** qmaster/threads/sge_scheduler_main() **********************************
*  NAME
*     sge_scheduler_main() -- main function of the scheduler thread 
*
*  SYNOPSIS
*     void * sge_scheduler_main(void *arg) 
*
*  FUNCTION
*     Main function of the scheduler thread, 
*
*  INPUTS
*     void *arg - pointer to the thread function (type cl_thread_settings_t*) 
*
*  RESULT
*     void * - always NULL 
*
*  NOTES
*     MT-NOTE: sge_scheduler_main() is MT safe 
*
*     MT-NOTE: this is a thread function. Do NOT use this function
*     MT-NOTE: in any other way!
*
*  SEE ALSO
*     qmaster/threads/sge_scheduler_initialize() 
*     qmaster/threads/sge_scheduler_cleanup_thread() 
*     qmaster/threads/sge_scheduler_terminate() 
*     qmaster/threads/sge_scheduler_main() 
*******************************************************************************/
void *
sge_scheduler_main(void *arg)
{
   time_t next_prof_output = 0;
   monitoring_t monitor;
   sge_gdi_ctx_class_t *ctx = NULL;
   sge_evc_class_t *evc = NULL;
   lList *alp = NULL;
   sge_where_what_t where_what;
   cl_thread_settings_t *thread_config = (cl_thread_settings_t*)arg;
   bool do_shutdown = false;
   bool do_endlessly = true;
   bool local_ret = true;

   DENTER(TOP_LAYER, "sge_scheduler_main");

   memset(&where_what, 0, sizeof(where_what));

   /*
    * startup
    */
   if (local_ret) {
      /* initialize commlib thread */
      cl_thread_func_startup(thread_config);

      /* initialize monitoring */
      sge_monitor_init(&monitor, thread_config->thread_name, SCH_EXT, SCT_WARNING, SCT_ERROR);
      sge_qmaster_thread_init(&ctx, SCHEDD, SCHEDD_THREAD, true);

      /* register at profiling module */
      set_thread_name(pthread_self(), "Scheduler Thread");
      conf_update_thread_profiling("Scheduler Thread");
      DPRINTF((SFN" started\n", thread_config->thread_name));

      /* initialize schedd_runnlog logging */
      schedd_set_schedd_log_file(ctx);
   }

   /* set profiling parameters */
   prof_set_level_name(SGE_PROF_EVENTMASTER, NULL, NULL);
   prof_set_level_name(SGE_PROF_SPOOLING, NULL, NULL);
   prof_set_level_name(SGE_PROF_CUSTOM0, "scheduler", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM1, "pending ticket calculation", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM3, "job sorting", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM4, "job dispatching", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM5, "send orders", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM6, "scheduler event loop", NULL);
   prof_set_level_name(SGE_PROF_CUSTOM7, "copy lists", NULL);
   prof_set_level_name(SGE_PROF_SCHEDLIB4, NULL, NULL);

   /* set-up needed for 'schedule' file */
   serf_init(schedd_serf_record_func, schedd_serf_newline);
   schedd_set_serf_log_file(ctx);

   /*
    * prepare event client/mirror mechanism
    */
   if (local_ret) {
      local_ret = sge_gdi2_evc_setup(&evc, ctx, EV_ID_SCHEDD, &alp, "scheduler");
      DPRINTF(("prepared event client/mirror mechanism\n"));
   }

   /*
    * register as event mirror
    */
   if (local_ret) {
      sge_mirror_initialize(evc, EV_ID_SCHEDD, "scheduler",
                            false, &event_update_func, &sge_mod_event_client,
                            &sge_add_event_client, &sge_remove_event_client,
                            &sge_handle_event_ack);
      evc->ec_register(evc, false, NULL, &monitor);
      evc->ec_set_busy_handling(evc, EV_BUSY_UNTIL_RELEASED);
      DPRINTF(("registered at event mirror\n"));
   }

   /*
    * subscribe necessary data
    */
   if (local_ret) {
      ensure_valid_what_and_where(&where_what);
      subscribe_scheduler(evc, &where_what);
      DPRINTF(("subscribed necessary data from event master\n"));
   }

   /* 
    * schedulers main loop
    */
   if (local_ret) {
      while (do_endlessly) {
         bool handled_events = false;
         lList *event_list = NULL;
         int execute = 0;
         double prof_copy = 0.0;
         double prof_total = 0.0;
         double prof_init = 0.0;
         double prof_free = 0.0;
         double prof_run = 0.0;
         lList *orders = NULL;

         if (sconf_get_profiling()) {
            prof_start(SGE_PROF_OTHER, NULL);
            prof_start(SGE_PROF_PACKING, NULL);
            prof_start(SGE_PROF_EVENTCLIENT, NULL);
            prof_start(SGE_PROF_MIRROR, NULL);
            prof_start(SGE_PROF_GDI, NULL);
            prof_start(SGE_PROF_HT_RESIZE, NULL);
            prof_start(SGE_PROF_CUSTOM0, NULL);
            prof_start(SGE_PROF_CUSTOM1, NULL);
            prof_start(SGE_PROF_CUSTOM3, NULL);
            prof_start(SGE_PROF_CUSTOM4, NULL);
            prof_start(SGE_PROF_CUSTOM5, NULL);
            prof_start(SGE_PROF_CUSTOM6, NULL);
            prof_start(SGE_PROF_CUSTOM7, NULL);
            prof_start(SGE_PROF_SCHEDLIB4, NULL);
         } else {
            prof_stop(SGE_PROF_OTHER, NULL);
            prof_stop(SGE_PROF_PACKING, NULL);
            prof_stop(SGE_PROF_EVENTCLIENT, NULL);
            prof_stop(SGE_PROF_MIRROR, NULL);
            prof_stop(SGE_PROF_GDI, NULL);
            prof_stop(SGE_PROF_HT_RESIZE, NULL);
            prof_stop(SGE_PROF_CUSTOM0, NULL);
            prof_stop(SGE_PROF_CUSTOM1, NULL);
            prof_stop(SGE_PROF_CUSTOM3, NULL);
            prof_stop(SGE_PROF_CUSTOM4, NULL);
            prof_stop(SGE_PROF_CUSTOM5, NULL);
            prof_stop(SGE_PROF_CUSTOM6, NULL);
            prof_stop(SGE_PROF_CUSTOM7, NULL);
            prof_stop(SGE_PROF_SCHEDLIB4, NULL);
         }

         /*
          * Wait for new events
          */
         MONITOR_IDLE_TIME(sge_scheduler_wait_for_event(evc, &event_list), (&monitor), mconf_get_monitor_time(), 
                           mconf_is_monitor_message());

         /* If we lost connection we have to register again */
         if (evc->ec_need_new_registration(evc)) {
            lFreeList(&event_list);
            if (evc->ec_register(evc, false, NULL, &monitor) == true) {
               DPRINTF(("re-registered at event master!\n"));
            }
         }

         if (event_list != NULL) {
            /* check for shutdown */
            do_shutdown = (lGetElemUlong(event_list, ET_type, sgeE_SHUTDOWN) != NULL) ? true : false;

            /* update mirror and free data */
            if (do_shutdown == false && sge_mirror_process_event_list(evc, event_list) == SGE_EM_OK) {
               handled_events = true;
               DPRINTF(("events handled\n"));
            } else {
               DPRINTF(("events contain shutdown event - ignoring events\n"));
            }
            lFreeList(&event_list);
         }
 
         /* if we actually got events, start the scheduling run and further event processing */
         if (handled_events == true) {
            lList *answer_list = NULL;
            scheduler_all_data_t copy;
            lList *master_cqueue_list = *(object_type_get_master_list(SGE_TYPE_CQUEUE));
            lList *master_job_list = *object_type_get_master_list(SGE_TYPE_JOB);
            lList *master_userset_list = *object_type_get_master_list(SGE_TYPE_USERSET);
            lList *master_project_list = *object_type_get_master_list(SGE_TYPE_PROJECT);
            lList *master_exechost_list= *object_type_get_master_list(SGE_TYPE_EXECHOST);
            lList *master_rqs_list= *object_type_get_master_list(SGE_TYPE_RQS);
            lList *master_centry_list = *object_type_get_master_list(SGE_TYPE_CENTRY);
            lList *master_ckpt_list = *object_type_get_master_list(SGE_TYPE_CKPT);
            lList *master_user_list = *object_type_get_master_list(SGE_TYPE_USER);
            lList *master_ar_list = *object_type_get_master_list(SGE_TYPE_AR);
            lList *master_pe_list = *object_type_get_master_list(SGE_TYPE_PE);
            lList *master_hgrp_list = *object_type_get_master_list(SGE_TYPE_HGROUP);
            lList *master_sharetree_list = *object_type_get_master_list(SGE_TYPE_SHARETREE);

            /* delay scheduling for test purposes, see issue GE-3306 */
            if (SGE_TEST_DELAY_SCHEDULING > 0) {
               sleep(SGE_TEST_DELAY_SCHEDULING);
            }

            PROF_START_MEASUREMENT(SGE_PROF_CUSTOM6);
            PROF_START_MEASUREMENT(SGE_PROF_CUSTOM7);

            if (__CONDITION(INFOPRINT)) {
               dstring ds;
               char buffer[128];

               sge_dstring_init(&ds, buffer, sizeof(buffer));
               DPRINTF(("================[SCHEDULING-EPOCH %s]==================\n",
                        sge_at_time(0, &ds)));
               sge_dstring_free(&ds);
            }

            /*
             * If there were new events then
             * copy/filter data necessary for the scheduler run
             * and run the scheduler method
             */
            memset(&copy, 0, sizeof(copy));

            copy.dept_list = lSelect("", master_userset_list, where_what.where_dept, where_what.what_acldept);
            copy.acl_list = lSelect("", master_userset_list, where_what.where_acl, where_what.what_acldept);

            DPRINTF(("RAW CQ:%d, J:%d, H:%d, C:%d, A:%d, D:%d, P:%d, CKPT:%d,"
                     " US:%d, PR:%d, RQS:%d, AR:%d, S:nd:%d/lf:%d\n",
               lGetNumberOfElem(master_cqueue_list),
               lGetNumberOfElem(master_job_list),
               lGetNumberOfElem(master_exechost_list),
               lGetNumberOfElem(master_centry_list),
               lGetNumberOfElem(copy.acl_list),
               lGetNumberOfElem(copy.dept_list),
               lGetNumberOfElem(master_project_list),
               lGetNumberOfElem(master_ckpt_list),
               lGetNumberOfElem(master_user_list),
               lGetNumberOfElem(master_project_list),
               lGetNumberOfElem(master_rqs_list),
               lGetNumberOfElem(master_ar_list),
               lGetNumberOfNodes(NULL, master_sharetree_list, STN_children),
               lGetNumberOfLeafs(NULL, master_sharetree_list, STN_children)
            ));

            sge_rebuild_job_category(master_job_list, master_userset_list,
                                        master_project_list, master_rqs_list);

            PROF_STOP_MEASUREMENT(SGE_PROF_CUSTOM7);
            prof_init = prof_get_measurement_wallclock(SGE_PROF_CUSTOM7, true, NULL);
            PROF_START_MEASUREMENT(SGE_PROF_CUSTOM7);

            sge_before_dispatch(evc);

            /* prepare data for the scheduler itself */
            copy.host_list = lCopyList("", master_exechost_list);

            /*
             * Within the scheduler we do only need QIs
             */
            {
               lListElem *cqueue = NULL;
               lEnumeration *what_queue3 = NULL;

               for_each(cqueue, master_cqueue_list) {
                  lList *qinstance_list = lGetList(cqueue, CQ_qinstances);
                  lList *t;

                  if (!qinstance_list) {
                     continue;
                  }

                  /* all_queue_list contains all queue instances with state and full queue name only */
                  if (!what_queue3) {
                     what_queue3 = lWhat("%T(%I%I)", lGetListDescr(qinstance_list), QU_full_name, QU_state);
                  }
                  t = lSelect("t", qinstance_list, NULL, what_queue3);
                  if (t) {
                     if (copy.all_queue_list == NULL) {
                        copy.all_queue_list = lCreateList("all", lGetListDescr(t));
                     }
                     lAppendList(copy.all_queue_list, t);
                     lFreeList (&t);
                  }

                  t = lSelect("t", qinstance_list, where_what.where_queue, where_what.what_queue2);
                  if (t) {
                     if (copy.queue_list == NULL) {
                        copy.queue_list = lCreateList("enabled", lGetListDescr(t));
                     }
                     lAppendList(copy.queue_list, t);
                     lFreeList (&t);
                  }

                  t = lSelect("t", qinstance_list, where_what.where_queue2, where_what.what_queue2);
                  if (t) {
                     if (copy.dis_queue_list == NULL) {
                        copy.dis_queue_list = lCreateList("disabled", lGetListDescr(t));
                     }
                     lAppendList(copy.dis_queue_list, t);
                     lFreeList (&t);
                  }
               }
               if (what_queue3) {
                  lFreeWhat(&what_queue3);
               }
            }

            if (sconf_is_job_category_filtering()) {
               copy.job_list = sge_category_job_copy(copy.queue_list, &orders, evc->monitor_next_run);
            } else {
               copy.job_list = lCopyList("", master_job_list);
            }

            /* no need to copy these lists, they are read only used */
            copy.centry_list = master_centry_list;
            copy.ckpt_list = master_ckpt_list;
            copy.hgrp_list = master_hgrp_list;

            /* these lists need to be copied because they are modified during scheduling run */
            copy.share_tree = lCopyList("", master_sharetree_list);
            copy.pe_list = lCopyList("", master_pe_list);
            copy.user_list = lCopyList("", master_user_list);
            copy.project_list = lCopyList("", master_project_list);
            copy.rqs_list = lCopyList("", master_rqs_list);
            copy.ar_list = lCopyList("", master_ar_list);

            /* report number of reduced and raw (in brackets) lists */
            DPRINTF(("Q:%d, AQ:%d J:%d(%d), H:%d(%d), C:%d, A:%d, D:%d, P:%d, CKPT:%d,"
                     " US:%d, PR:%d, RQS:%d, AR:%d, S:nd:%d/lf:%d \n",
               lGetNumberOfElem(copy.queue_list),
               lGetNumberOfElem(copy.all_queue_list),
               lGetNumberOfElem(copy.job_list),
               lGetNumberOfElem(master_job_list),
               lGetNumberOfElem(copy.host_list),
               lGetNumberOfElem(master_exechost_list),
               lGetNumberOfElem(copy.centry_list),
               lGetNumberOfElem(copy.acl_list),
               lGetNumberOfElem(copy.dept_list),
               lGetNumberOfElem(copy.pe_list),
               lGetNumberOfElem(copy.ckpt_list),
               lGetNumberOfElem(copy.user_list),
               lGetNumberOfElem(copy.project_list),
               lGetNumberOfElem(copy.rqs_list),
               lGetNumberOfElem(copy.ar_list),
               lGetNumberOfNodes(NULL, copy.share_tree, STN_children),
               lGetNumberOfLeafs(NULL, copy.share_tree, STN_children)
            ));

            if (getenv("SGE_ND")) {
               printf("Q:%d, AQ:%d J:%d(%d), H:%d(%d), C:%d, A:%d, D:%d, "
                  "P:%d, CKPT:%d, US:%d, PR:%d, RQS:%d, AR:%d, S:nd:%d/lf:%d \n",
                  lGetNumberOfElem(copy.queue_list),
                  lGetNumberOfElem(copy.all_queue_list),
                  lGetNumberOfElem(copy.job_list),
                  lGetNumberOfElem(master_job_list),
                  lGetNumberOfElem(copy.host_list),
                  lGetNumberOfElem(master_exechost_list),
                  lGetNumberOfElem(copy.centry_list),
                  lGetNumberOfElem(copy.acl_list),
                  lGetNumberOfElem(copy.dept_list),
                  lGetNumberOfElem(copy.pe_list),
                  lGetNumberOfElem(copy.ckpt_list),
                  lGetNumberOfElem(copy.user_list),
                  lGetNumberOfElem(copy.project_list),
                  lGetNumberOfElem(copy.rqs_list),
                  lGetNumberOfElem(copy.ar_list),
                  lGetNumberOfNodes(NULL, copy.share_tree, STN_children),
                  lGetNumberOfLeafs(NULL, copy.share_tree, STN_children)
                 );
            } else {
               schedd_log("-------------START-SCHEDULER-RUN-------------", NULL, evc->monitor_next_run);
            }

            PROF_STOP_MEASUREMENT(SGE_PROF_CUSTOM7);
            prof_copy = prof_get_measurement_wallclock(SGE_PROF_CUSTOM7, true, NULL);
            PROF_START_MEASUREMENT(SGE_PROF_CUSTOM7);

            scheduler_method(evc, &answer_list, &copy, &orders);
            answer_list_output(&answer_list);

            PROF_STOP_MEASUREMENT(SGE_PROF_CUSTOM7);
            prof_run = prof_get_measurement_wallclock(SGE_PROF_CUSTOM7, true, NULL);
            PROF_START_MEASUREMENT(SGE_PROF_CUSTOM7);

            /* .. which gets deleted after using */
            lFreeList(&(copy.host_list));
            lFreeList(&(copy.queue_list));
            lFreeList(&(copy.dis_queue_list));
            lFreeList(&(copy.all_queue_list));
            lFreeList(&(copy.job_list));
            lFreeList(&(copy.acl_list));
            lFreeList(&(copy.dept_list));
            lFreeList(&(copy.pe_list));
            lFreeList(&(copy.share_tree));
            lFreeList(&(copy.user_list));
            lFreeList(&(copy.project_list));
            lFreeList(&(copy.rqs_list));
            lFreeList(&(copy.ar_list));

            PROF_STOP_MEASUREMENT(SGE_PROF_CUSTOM7);
            prof_free = prof_get_measurement_wallclock(SGE_PROF_CUSTOM7, true, NULL);

            /* 
             * need to sync with event master thread
             * if schedd configuration changed then settings in evm can be adjusted
             */
            if (sconf_is_new_config()) {
               /* set scheduler interval / event delivery interval */
               u_long32 interval = sconf_get_schedule_interval();
               if (evc->ec_get_edtime(evc) != interval) {
                  evc->ec_set_edtime(evc, interval);
               }

               /* set job / ja_task event flushing */
               set_job_flushing(evc);

               /* no need to ec_commit here - we do it when resetting the busy state */

               /* now we handled the new schedd config - no need to do it twice */
               sconf_reset_new_config();
            }

            /* block till master handled all GDI orders */
            sge_schedd_block_until_orders_processed(evc->get_gdi_ctx(evc), NULL);
            schedd_order_destroy();

            /*
             * Stop profiling for "schedd run total" and the subcategories
             */
            PROF_STOP_MEASUREMENT(SGE_PROF_CUSTOM6);
            prof_total = prof_get_measurement_wallclock(SGE_PROF_CUSTOM6, true, NULL);

            if (prof_is_active(SGE_PROF_CUSTOM6)) {
               PROFILING((SGE_EVENT, "PROF: schedd run took: %.3f s (init: %.3f s, copy: %.3f s, "
                          "run:%.3f, free: %.3f s, jobs: %d, categories: %d/%d)",
                           prof_total, prof_init, prof_copy, prof_run, prof_free,
                           lGetNumberOfElem(*object_type_get_master_list(SGE_TYPE_JOB)), sge_category_count(),
                           sge_cs_category_count() ));
            }
            if (getenv("SGE_ND") != NULL) {
               printf("--------------STOP-SCHEDULER-RUN-------------\n");
            } else {
               schedd_log("--------------STOP-SCHEDULER-RUN-------------", NULL, evc->monitor_next_run);
            }

            thread_output_profiling("scheduler thread profiling summary:\n", &next_prof_output);

            sge_monitor_output(&monitor);
         }

         /* reset the busy state */
         evc->ec_set_busy(evc, 0);
         evc->ec_commit(evc, NULL);

         /* stop logging into schedd_runlog (enabled via -tsm) */
         evc->monitor_next_run = false;

         /*
          * pthread cancelation point
          *
          * sge_scheduler_cleanup_thread() is the last function which should
          * be called so it is pushed first
          */
         pthread_cleanup_push(sge_scheduler_cleanup_thread, (void *) &ctx);
         pthread_cleanup_push((void (*)(void *))sge_scheduler_cleanup_monitor,
                              (void *)&monitor);
         pthread_cleanup_push((void (*)(void *))sge_scheduler_cleanup_event_client,
                              (void *)evc);
         cl_thread_func_testcancel(thread_config);
         pthread_cleanup_pop(execute);
         pthread_cleanup_pop(execute);
         pthread_cleanup_pop(execute);
         DPRINTF(("passed cancelation point\n"));
      }
Ejemplo n.º 29
0
static void refresh_lcd_file(void)
{
	char targetfile[256];
	char temp_file[256];
	char channame[CS_SERVICENAME_SIZE];

	set_thread_name(__func__);

	if(cfg.lcd_output_path == NULL)
	{
		get_tmp_dir_filename(targetfile, sizeof(targetfile), "oscam.lcd");
		get_tmp_dir_filename(temp_file, sizeof(temp_file), "oscam.lcd.tmp");
	}
	else
	{
		snprintf(targetfile, sizeof(targetfile), "%s%s", cfg.lcd_output_path, "/oscam.lcd");
		snprintf(temp_file, sizeof(temp_file), "%s%s.tmp", cfg.lcd_output_path, "/oscam.lcd");
	}

	int8_t iscccam = 0;
	int32_t seconds = 0, secs = 0, fullmins = 0, mins = 0, fullhours = 0, hours = 0,    days = 0;
	time_t now;


	while(running)
	{
		now = time((time_t *)0);
		int16_t cnt = 0, idx = 0, count_r = 0, count_p = 0, count_u = 0;
		FILE *fpsave;

		if((fpsave = fopen(temp_file, "w")))
		{

			idx = 0;
			int16_t i;
			char *type;
			char *label;
			char *status;

			// Statuslines start
			secs = 0;
			fullmins = 0;
			mins = 0;
			fullhours = 0;
			hours = 0;
			days = 0;

			seconds = now - first_client->login;
			secs = seconds % 60;
			if(seconds > 60)
			{
				fullmins = seconds / 60;
				mins = fullmins % 60;
				if(fullmins > 60)
				{
					fullhours = fullmins / 60;
					hours = fullhours % 24;
					days = fullhours / 24;
				}
			}

			fprintf(fpsave, "Version: %s\n", CS_VERSION);
			fprintf(fpsave, "Revision: %s\n", CS_SVN_VERSION);
			if(days == 0)
				{ fprintf(fpsave, "up: %02d:%02d:%02d\n", hours, mins, secs); }
			else
				{ fprintf(fpsave, "up: %02dd %02d:%02d:%02d\n", days, hours, mins, secs); }
			fprintf(fpsave, "totals: %d/%d/%d/%d/%d/%d\n", first_client->cwfound, first_client->cwnot, first_client->cwignored, first_client->cwtout, first_client->cwcache, first_client->cwtun);
			fprintf(fpsave, "uptime: %d\n", seconds);
			// Statuslines end

			// Readertable head
			fprintf(fpsave, "Typ| Label      | Idle         | w | s | b | e | St\n");
			fprintf(fpsave, "---+------------+--------------+---+---+---+---+----\n");

			struct s_client *cl;

			// Reader/Proxy table start
			for(i = 0, cl = first_client; cl ; cl = cl->next, i++)
			{

				if((cl->typ == 'r' || cl->typ == 'p') && ((now - cl->last) < 20 || !cfg.lcd_hide_idle))
				{
					type = "";
					label = "";
					status = "OFF";
					secs = 0;
					fullmins = 0;
					mins = 0;
					fullhours = 0;
					hours = 0;
					days = 0;

					seconds = now - cl->last;

					if(cl->typ == 'r')
					{
						type = "R";
						idx = count_r;
						label = cl->reader->label;
						if(cl->reader->card_status == CARD_INSERTED)
							{ status = "OK"; }
						count_r++;
					}

					else if(cl->typ == 'p')
					{
						type = "P";
						iscccam = strncmp(client_get_proto(cl), "cccam", 5) == 0;
						idx = count_p;
						label = cl->reader->label;

						if(cl->reader->card_status == CARD_INSERTED)
							{ status = "CON"; }

						count_p++;
					}


					secs = seconds % 60;
					if(seconds > 60)
					{
						fullmins = seconds / 60;
						mins = fullmins % 60;
						if(fullmins > 60)
						{
							fullhours = fullmins / 60;
							hours = fullhours % 24;
							days = fullhours / 24;
						}
					}

					int16_t written = 0, skipped = 0, blocked = 0, error = 0;

					char emmtext[16] = "               ";
					if(cl->typ == 'r' || !iscccam)
					{
						for(i = 0; i < 4; i++)
						{
							error += cl->reader->emmerror[i];
							blocked += cl->reader->emmblocked[i];
							skipped += cl->reader->emmskipped[i];
							written += cl->reader->emmwritten[i];
						}
						snprintf(emmtext, 16, "%3d|%3d|%3d|%3d",
								 written > 999 ? 999 : written,
								 skipped > 999 ? 999 : skipped,
								 blocked > 999 ? 999 : blocked,
								 error > 999 ? 999 : error);
					}
					else if(cl->typ == 'p' && iscccam)
					{
						if(!cccam_snprintf_cards_stat(cl, emmtext, 16))
							{ snprintf(emmtext, 16, "   No cards    "); }
					}

					if(days == 0)
					{
						fprintf(fpsave, "%s%d | %-10.10s |     %02d:%02d:%02d |%s| %s\n",
								type, idx, label, hours, mins,
								secs, emmtext, status);
					}
					else
					{
						fprintf(fpsave, "%s%d | %-10.10s |% 3dd %02d:%02d:%02d |%s| %s\n",
								type, idx, label, days, hours, mins,
								secs, emmtext, status);
					}
				}
			}

			fprintf(fpsave, "---+------------+--------------+---+---+---+--++----\n");
			// Reader/Proxy table end


			// Usertable start
			fprintf(fpsave, "Typ| Label      | Channel                     | Time\n");
			fprintf(fpsave, "---+------------+-----------------------------+-----\n");

			/*
			//Testclient
			fprintf(fpsave,"%s%d | %-10.10s | %-10.10s:%-17.17s| % 4d\n",
			        "U",
			        1,
			        "test",
			        "Sky De",
			        "Discovery Channel",
			        568);

			*/

			for(i = 0, cl = first_client; cl ; cl = cl->next, i++)
			{

				seconds = now - cl->lastecm;

				if(cl->typ == 'c' && seconds < 15)
				{		
					type = "U";
					idx = count_u;
					label = cl->account->usr;
					count_u++;
							
					get_servicename(cl, cl->last_srvid, cl->last_provid, cl->last_caid, channame, sizeof(channame));
					fprintf(fpsave, "%s%d | %-10.10s | %-10.10s:%-17.17s| % 4d\n",
							type,
							idx,
							label,
							get_cl_lastprovidername(cl),
							cl->last_srvidptr && cl->last_srvidptr->name ? cl->last_srvidptr->name : "",
							cl->cwlastresptime);

				}
			}
			fprintf(fpsave, "---+------------+-----------------------------+-----\n");
			// Usertable end
			fclose(fpsave);
		}

		cs_sleepms(cfg.lcd_write_intervall * 1000);
		cnt++;

		if(rename(temp_file, targetfile) < 0)
			{ cs_log("An error occured while writing oscam.lcd file %s.", targetfile); }

	}

}
void *
sge_event_master_main(void *arg)
{
   bool do_endlessly = true;
   cl_thread_settings_t *thread_config = (cl_thread_settings_t*)arg;
   sge_gdi_ctx_class_t *ctx = NULL;
   monitoring_t monitor;
   monitoring_t *monitorp = &monitor;

   lListElem *report = NULL;
   lList *report_list = NULL;
   time_t next_prof_output = 0;

   DENTER(TOP_LAYER, "sge_event_master_main");

   DPRINTF(("started"));
   cl_thread_func_startup(thread_config);
   sge_monitor_init(&monitor, thread_config->thread_name, EDT_EXT, EMT_WARNING, EMT_ERROR);
   sge_qmaster_thread_init(&ctx, QMASTER, DELIVERER_THREAD, true);

   /* register at profiling module */
   set_thread_name(pthread_self(), "Deliver Thread");
   conf_update_thread_profiling("Deliver Thread");

   report_list = lCreateListHash("report list", REP_Type, false);
   report = lCreateElem(REP_Type);
   lSetUlong(report, REP_type, NUM_REP_REPORT_EVENTS);
   lSetHost(report, REP_host, ctx->get_qualified_hostname(ctx));
   lAppendElem(report_list, report);
 
   while (do_endlessly) {
      int execute = 0;

      thread_start_stop_profiling();

      /*
       * did a new event arrive which has a flush time of 0 seconds?
       */
      MONITOR_IDLE_TIME(sge_event_master_wait_next(), (&monitor), mconf_get_monitor_time(), 
                        mconf_is_monitor_message());

      MONITOR_MESSAGES((monitorp));
      MONITOR_EDT_COUNT((&monitor));
      MONITOR_CLIENT_COUNT((&monitor), lGetNumberOfElem(Event_Master_Control.clients));

      sge_event_master_process_requests(&monitor);
      sge_event_master_send_events(ctx, report, report_list, &monitor);
      sge_monitor_output(&monitor);

      thread_output_profiling("event master thread profiling summary:\n",
                              &next_prof_output);

      /* pthread cancelation point */
      pthread_cleanup_push((void (*)(void *))sge_event_master_cleanup_monitor,
                           (void *)&monitor);
      pthread_cleanup_push((void (*)(void *))sge_event_master_cleanup_report_list,
                           (void *)&report_list);
      cl_thread_func_testcancel(thread_config);
      pthread_cleanup_pop(execute); 
      pthread_cleanup_pop(execute); 
      if (sge_thread_has_shutdown_started()) {
         DPRINTF(("waiting for termination\n"));
         sleep(1);
      }
   }

   /*
    * Don't add cleanup code here. It will never be executed. Instead register
    * a cleanup function with pthread_cleanup_push()/pthread_cleanup_pop() before 
    * and after the call of cl_thread_func_testcancel()
    */

   DRETURN(NULL);
}