Esempio n. 1
0
File: core.c Progetto: k0a1a/pom-ng
void *core_processing_thread_func(void *priv) {

	struct core_processing_thread *tpriv = priv;

	if (packet_info_pool_init()) {
		halt("Error while initializing the packet_info_pool", 1);
		return NULL;
	}

	registry_perf_inc(perf_thread_active, 1);

	pom_mutex_lock(&tpriv->pkt_queue_lock);

	while (core_run) {
		
		while (!tpriv->pkt_queue_head) {
			// We are not active while waiting for a packet
			registry_perf_dec(perf_thread_active, 1);

			debug_core("thread %u : waiting", tpriv->thread_id);

			if (registry_perf_getval(perf_thread_active) == 0) {
				if (core_get_state() == core_state_finishing)
					core_set_state(core_state_idle);
			}

			if (!core_run) {
				pom_mutex_unlock(&tpriv->pkt_queue_lock);
				goto end;
			}

			int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock);
			if (res) {
				pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res));
				abort();
				return NULL;
			}
			registry_perf_inc(perf_thread_active, 1);
		}


		// Dequeue a packet
		struct core_packet_queue *tmp = tpriv->pkt_queue_head;
		tpriv->pkt_queue_head = tmp->next;
		if (!tpriv->pkt_queue_head)
			tpriv->pkt_queue_tail = NULL;


		// Add it to the unused list
		tmp->next = tpriv->pkt_queue_unused;
		tpriv->pkt_queue_unused = tmp;

		tpriv->pkt_count--;

		registry_perf_dec(perf_pkt_queue, 1);

		__sync_fetch_and_sub(&core_pkt_queue_count, 1);

		if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) {

			pom_mutex_lock(&core_pkt_queue_wait_lock);
			// Tell the input processes that they can continue queuing packets
			int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond);
			if (res) {
				pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res));
				abort();
			}
			pom_mutex_unlock(&core_pkt_queue_wait_lock);
		}

		// Keep track of our packet
		struct packet *pkt = tmp->pkt;

		debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		pom_mutex_unlock(&tpriv->pkt_queue_lock);

		// Lock the processing lock
		pom_rwlock_rlock(&core_processing_lock);

		// Update the current clock
		if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous
			core_clock[tpriv->thread_id] = pkt->ts;

		//pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self());
		if (core_process_packet(pkt) == POM_ERR) {
			core_run = 0;
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		// Process timers
		if (timers_process() != POM_OK) {
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		pom_rwlock_unlock(&core_processing_lock);

		if (packet_release(pkt) != POM_OK) {
			pomlog(POMLOG_ERR "Error while releasing the packet");
			break;
		}
		
		debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		// Re-lock our queue for the next run
		pom_mutex_lock(&tpriv->pkt_queue_lock);

	}

	halt("Processing thread encountered an error", 1);
end:
	packet_info_pool_cleanup();

	return NULL;
}
Esempio n. 2
0
void cis_run (void)
{
  int temp_int = 0, temp_int2 = 0;
  int i = 0, r, w;
  int eventcount = 0;
  connection *read_events[__MAXFDS__];
  connection *write_events[__MAXFDS__];

  fifo_root *global_recvq = NULL;
  linklist_iter *reaper_iter = NULL;

  connection *temp = NULL;
  char *line;

  int patience = 250;

  reactor_running = true;

  global_recvq = fifo_create();

  /* main loop */
  for(;reactor_running;)
  {

    /** Grab some socket events to play with */
    eventcount = socketengine->wait(read_events, write_events, patience);
    r = w = 0;

    /** Run through the existing connections looking for data to read */
    for(i = 0; (r+w) < eventcount; i++)
    {
      if (read_events[i] != NULL)
      {
        r++;
        temp_int = read_events[i]->recvq->queue_size;
        conn_read_to_recvq(read_events[i]);
        if ((read_events[i]->recvq->queue_size > temp_int) && ((equal_fairness == 0) || (temp_int == 0)))
          fifo_add(global_recvq, read_events[i]);
        /* Don't wait for data! too much to do! */
        patience = 0;
      }
      if (write_events[i] != NULL)
      {
        w++;
        if (write_events[i]->state.connecting == 1)
        {
            assert(getsockopt(write_events[i]->fd, SOL_SOCKET, SO_ERROR, &temp_int, &temp_int2) == 0);
            if (temp_int == 0)
            {
                write_events[i]->connected(write_events[i]);
                write_events[i]->state.connecting = 0;
            }
            else
            {
                write_events[i]->connect_failed(write_events[i], temp_int);
                write_events[i]->state.remote_closed = 1;
                write_events[i]->state.local_read_shutdown = 1;
                write_events[i]->state.local_write_shutdown = 1;
                socketengine->del(write_events[i]);
                cis_reap_connection(write_events[i]);
            }
        }
        else
            conn_send_from_sendq(write_events[i]);
      }
    } // foreach (event)

    /** Process some of the readq */

    if (global_recvq->members > 0)
    {
      for (i = 0; i <= 20; i++)
      {
        temp = fifo_pop(global_recvq);
        if (temp == NULL)
        {
          /* We seem to be out of connections to process... Have more patience waiting for new data ...*/
          patience = 250;
          break;
        }
        if ((temp->state.local_read_shutdown == 0)&&(temp->recvq->queue_size > 0))
        {
          /* Local dead connections don't get processed ... remote dead ones -do- (since they died after sending this...) */
          if (temp->callback_read)
            temp->callback_read(temp);
          else
            buffer_empty(temp->recvq);

          /* If it doesn't have any more messages left, nuke it from the queue... */
          if (temp->recvq->queue_size == 0)
            fifo_del(global_recvq, temp);
          else
          {
            if (equal_fairness == 1)
            {
              fifo_del(global_recvq, temp);
              fifo_add(global_recvq, temp);
            }
            else
            {
              /* We shove it back on the end of the queue,
               * just in case there are insufficent instances to cover the buffer content.
               *
               * While this behaviour won't break the code it isn't true first come first served,
               * and it will have a performance impact.
               *
               * TODO: It would be nice to have an alternative ...
               * possibly making conn_read_to_recvq return a line count to allow multiple additions
               */
              fifo_add(global_recvq, temp);
            }
          }

          if (global_recvq->members == 0)
          {
            patience = 250; /* Nothing to do, so I don't mind wait a while */
          }
          break;
        }
        else
        {
          /* So this is either locally dead, or has no recvq ... both could happen, but ignore it either way */
          if (temp->recvq->queue_size > 0)
          {
            /* Must be locally dead but with a recvq still ... this shouldn't be possible? */
            buffer_empty(temp->recvq);
          }

          /* In any case, if we're ignoring it, might as well remove it completely ... */
          fifo_del(global_recvq, temp);

          /* Since we didn't do much with this one, we'll try another pass... */
          i--;
        }
      }
    }

    /** Attempt to reap connections... */
    reaper_iter = linklist_iter_create(reaper_list);
    while (temp = linklist_iter_next(reaper_iter))
    {
      /* Has this connection reached the end of it's life? */
      if (((temp->state.remote_closed == 1) && (temp->recvq->queue_size == 0)) ||
           (temp->state.local_read_shutdown == 1) && (temp->sendq->queue_size == 0))
      {
        /* Remove it from any relevant queues... */
        linklist_iter_del(reaper_iter);
        fifo_del(global_recvq,temp);
        /* Make sure it's all closed down... */
        temp->state.local_write_shutdown = 1;
        temp->close(temp);
        /* Free the important stuff.... */
        if (temp->recvq)			buffer_free(temp->recvq);
        if (temp->sendq)			buffer_free(temp->sendq);
        sfree(temp);
      }
    }
    linklist_iter_free(reaper_iter);

    timers_process();
  }
  return;
}
Esempio n. 3
0
/** Główna funkcja programu
*/
int main(void)
{
	FuncPtr FPtr;

	ioinit();

	hd44780_clear();
	hd44780_bl_on();
	hd44780_cursor_off();

	load_basic_settings(EEPROM_BASIC_SETTINGS_BEGIN);
	load_temp_sensors_settings(EEPROM_TEMP_SETTINGS_BEGIN);
	load_outputs_settings(EEPROM_OUTS_SETTING_BEGIN);
	load_timers_settings(EEPROM_TIMERS_SETTING_BEGIN);
	load_timersv_settings(EEPROM_TIMERSV_SETTING_BEGIN);
	load_topoff_settings(EEPROM_TOPOFF_SETTINGS_BEGIN);

	hd44780_printOnLcdDelay(PSTR(PROGRAM_FULL_NAME));
	hd44780_clear();
	sei();

	rtc_get_time();
	rtc_get_date();

	gui_client_present();

	log_write_record(LOG_EVENT_START,0,0,0);

	for (;;) {

		wdt_reset();

		qbuttons_process();

		ui_key_pressed();

		if (key_pressed && alarms_notification_enabled() && alarms_is_active() ){
			alarms_block_notification();
			menu_set(1,1,0,0);
			key_pressed = NONE;
		}

		//########################### akcje wykonywane kiedy minie sekunda
		if (SF(FLAG_SECOND_PAST)) {
			SSF(FLAG_FORCE_LCD_REFRESH);
			temp_counter++;
			top_off_check_time();
			top_off_process();
			CSF(FLAG_SECOND_PAST);
		}

 		//########################### akcje wykonywane kiedy minie pół sekundy
		if (SF(FLAG_HALF_SECOND_PAST)) {
			SSF(FLAG_FORCE_LCD_SEMI_REFRESH);
			if (alarms_is_active() && alarms_notification_enabled()) {
				buzzer_toggle();
			} else {
				buzzer_off();
			}
			CSF(FLAG_HALF_SECOND_PAST);
		}

		//######################################## odświeżenie wyswietlacza LCD
		if (system_flags & LCD_REFRESH_MASK) {
			//if (back_light_counter < LCD_BL_SHUTOFF_TIME) { hd44780_bl_on(); }
			FPtr=(FuncPtr)pgm_read_word(&FuncPtrTable[menu_func_index()]);
			hd44780_outcmd(HD44780_HOME);
			FPtr();
			menu_dummy();
			CSF(FLAG_FORCE_LCD_REFRESH);
			CSF(FLAG_FORCE_LCD_SEMI_REFRESH);
		}

		//########################### akcje wykonywane kiedy minie minuta
		if (SF(FLAG_MINUTE_PAST)) {
			//if (back_light_counter < 0xFF) { back_light_counter++; }
			//if (back_light_counter >= LCD_BL_SHUTOFF_TIME) { hd44780_bl_off(); }
			timers_process();
			timersv_process();
			if (alarms_block_notification_counter < ALARM_MAX_BLOCK_VALUE) {
				alarms_block_notification_counter++;
			}
			CSF(FLAG_MINUTE_PAST);
		}

		//########################### akcje wykonywane kiedy minie godzina
		if (SF(FLAG_HOUR_PAST)) {
			temp_register();
			CSF(FLAG_HOUR_PAST);
		}

		//########################### akcje wykonywane jak minie dzień
		if (SF(FLAG_DAY_PAST)) {
			rtc_get_time();
			rtc_get_date();
			CSF(FLAG_DAY_PAST);
		}

		//########################### pomiar temeratury
		if (temp_counter > TEMP_SENSOR_READ_INTERVAL) {
			temp_read_temperature();
			temp_counter = 0;
		}

		//######################################## obsługa interfejsu szeregowego
        gui_cm_process_char();

        //######################################## aktualizacja stanu wyjść
		outputs_update();
    }
  	return 0;
}
Esempio n. 4
0
void *core_processing_thread_func(void *priv) {

	struct core_processing_thread *tpriv = priv;

	pom_mutex_lock(&core_pkt_queue_mutex);

	while (core_run) {
		
		while (!core_pkt_queue_head && !tpriv->pkt_queue_head) {
			if (core_thread_active == 0) {
				if (core_get_state() == core_state_finishing)
					core_set_state(core_state_idle);
			}

			if (!core_run) {
				pom_mutex_unlock(&core_pkt_queue_mutex);
				return NULL;
			}

			if (pthread_cond_wait(&core_pkt_queue_restart_cond, &core_pkt_queue_mutex)) {
				pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(errno));
				// Should probably abort here
				return NULL;
			}

		}
		core_thread_active++;

		struct core_packet_queue *tmp = NULL;
		
		// Dequeue packets from our own queue first
		struct packet *pkt = NULL;
		if (tpriv->pkt_queue_head) {
			tmp = tpriv->pkt_queue_head;
			pkt = tmp->pkt;

			tpriv->pkt_queue_head = tmp->next;
			if (tpriv->pkt_queue_head)
				tpriv->pkt_queue_head->prev = NULL;
			else
				tpriv->pkt_queue_tail = NULL;

		} else {
			tmp = core_pkt_queue_head;
			pkt = tmp->pkt;

			// Remove the packet from the main queue
			core_pkt_queue_head = tmp->next;
			if (core_pkt_queue_head)
				core_pkt_queue_head->prev = NULL;
			else
				core_pkt_queue_tail = NULL;
		}

		// Add it to the unused list
		memset(tmp, 0, sizeof(struct core_packet_queue));
		tmp->next = core_pkt_queue_unused;
		if (tmp->next)
			tmp->next->prev = tmp;
		core_pkt_queue_unused = tmp;

		core_pkt_queue_usage--;

		pom_mutex_unlock(&core_pkt_queue_mutex);

		// Lock the processing thread
		if (pthread_rwlock_rdlock(&core_processing_lock)) {
			pomlog(POMLOG_ERR "Error while locking the processing lock : %s", pom_strerror(errno));
			abort();
			return NULL;
		}

		// Update the current clock
		pom_mutex_lock(&core_clock_lock);
		memcpy(&core_clock, &pkt->ts, sizeof(struct timeval));
		pom_mutex_unlock(&core_clock_lock);

		//pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self());
		if (core_process_packet(pkt) == POM_ERR) {
			core_run = 0;
			halt("Packet processing encountered an error", 1);
			pthread_cond_broadcast(&core_pkt_queue_restart_cond);
			pthread_rwlock_unlock(&core_processing_lock);
			return NULL;
		}

		// Process timers
		if (timers_process() != POM_OK) {
			pthread_rwlock_unlock(&core_processing_lock);
			return NULL;
		}

		if (pthread_rwlock_unlock(&core_processing_lock)) {
			pomlog(POMLOG_ERR "Error while releasing the processing lock : %s", pom_strerror(errno));
			break;
		}

		if (packet_pool_release(pkt) != POM_OK) {
			pomlog(POMLOG_ERR "Error while releasing the packet to the pool");
			break;
		}
		
		pom_mutex_lock(&core_pkt_queue_mutex);
		if (pthread_cond_broadcast(&core_pkt_queue_restart_cond)) {
			pomlog(POMLOG_ERR "Error while signaling the done condition : %s", pom_strerror(errno));
			pom_mutex_unlock(&core_pkt_queue_mutex);
			break;

		}
		core_thread_active--;

	}
	pom_mutex_unlock(&core_pkt_queue_mutex);

	halt("Processing thread encountered an error", 1);
	return NULL;
}