コード例 #1
0
ファイル: rpl-timers.c プロジェクト: AWGES/StormSAMR21
/*---------------------------------------------------------------------------*/
static void
schedule_dao(rpl_instance_t *instance, clock_time_t latency)
{
  clock_time_t expiration_time;

  if(rpl_get_mode() == RPL_MODE_FEATHER) {
    return;
  }

  expiration_time = etimer_expiration_time(&instance->dao_timer.etimer);

  if(!etimer_expired(&instance->dao_timer.etimer)) {
    PRINTF("RPL: DAO timer already scheduled\n\r");
  } else {
    if(latency != 0) {
      expiration_time = latency / 2 +
        (random_rand() % (latency));
    } else {
      expiration_time = 0;
    }
    PRINTF("RPL: Scheduling DAO timer %u ticks in the future\n\r",
           (unsigned)expiration_time);
    ctimer_set(&instance->dao_timer, expiration_time,
               handle_dao_timer, instance);

    set_dao_lifetime_timer(instance);
  }
}
コード例 #2
0
void
rpl_schedule_dao(rpl_dag_t *dag)
{
  clock_time_t expiration_time;

  expiration_time = etimer_expiration_time(&dag->dao_timer.etimer);

  if(!etimer_expired(&dag->dao_timer.etimer)) {
    PRINTF("RPL: DAO timer already scheduled");
  } else {
    expiration_time = DEFAULT_DAO_LATENCY / 2 +
      (random_rand() % (DEFAULT_DAO_LATENCY));
    PRINTF("RPL: Scheduling DAO timer Xu ticks in the future");//, (unsigned)expiration_time);
    ctimer_set(&dag->dao_timer, expiration_time,
               handle_dao_timer, dag);
  }
}
コード例 #3
0
void
rpl_schedule_dao(rpl_instance_t *instance)
{
  clock_time_t expiration_time;

  expiration_time = etimer_expiration_time(&instance->dao_timer.etimer);

  if(!etimer_expired(&instance->dao_timer.etimer)) {
    PRINTF("RPL: DAO timer already scheduled\n");
  } else {
    expiration_time = RPL_DAO_LATENCY / 2 +
      (random_rand() % (RPL_DAO_LATENCY));
    PRINTF("RPL: Scheduling DAO timer %u ticks in the future\n",
           (unsigned)expiration_time);
    ctimer_set(&instance->dao_timer, expiration_time,
               handle_dao_timer, instance);
  }
}
コード例 #4
0
PROCESS_THREAD(doorAutoOpeningProcess, ev, data)
{
	static struct etimer initialDelay;
	static struct etimer blinkingTimer;
	static int blinkings;
	static clock_time_t remainingDelay;
	
	PROCESS_BEGIN();
		
		printf("Door auto opening: started\n");

		remainingDelay = DOOR_AUTO_OPENING_DELAY * CLOCK_SECOND;

		while(1)
		{
			printf("Door auto opening: waiting initial delay\n");
			etimer_set(&initialDelay, remainingDelay);
			PROCESS_WAIT_EVENT();
			if(ev == PROCESS_EVENT_TIMER && etimer_expired(&initialDelay))
				break;
			else if( ev == alarm_toggled_event)
			{
				printf("Door auto opening: delay interrupted by alarm\n");
				remainingDelay = etimer_expiration_time(&initialDelay) - clock_time();
				etimer_stop(&initialDelay);
				PROCESS_WAIT_EVENT_UNTIL(ev == alarm_toggled_event);
				printf("Door auto opening: alarm stopped, resuming delay\n");
			}
		}
		
		printf("Door auto opening: door opened\n");
		setLock(UNLOCKED);
		
		printf("Door auto opening: blinking started\n");
		
		blinkings = 0;
		
		leds_on(LEDS_BLUE);
		
		while(blinkings < AUTO_OPENING_BLINKINGS - 1)
		{
			etimer_set(&blinkingTimer, (AUTO_OPENING_LED_PERIOD / 2) * CLOCK_SECOND);
			PROCESS_WAIT_EVENT();
			if(ev == PROCESS_EVENT_TIMER && etimer_expired(&blinkingTimer))
			{
				printf("Door auto opening: blinking\n");
				leds_toggle(LEDS_BLUE);
				etimer_reset(&blinkingTimer);
				blinkings++;
			}
			else if(ev == alarm_toggled_event)
			{
				printf("Door auto opening: blinking interrupted by alarm\n");
				etimer_stop(&blinkingTimer);
				PROCESS_WAIT_EVENT_UNTIL(ev == alarm_toggled_event);
				printf("Door auto opening: alarm stopped, resuming blinking\n");
			}
		}
		
		printf("Door auto opening: blinking stopped\n");
		printf("Gate auto opening: door locked\n");
		setLock(LOCKED);
	PROCESS_END();
}
コード例 #5
0
PROCESS_THREAD(proc_epoch_syncer, ev, data) {
	static struct etimer send_timer;
	static struct etimer epoch_timer;
	static const struct broadcast_callbacks broadcast_cbs = {__broadcast_recv_cb, __broadcast_sent_cb};
	static struct broadcast_conn conn;

	PROCESS_EXITHANDLER(broadcast_close(&conn));

	PROCESS_BEGIN();


#ifdef TRACK_CONNECTIONS
	/* Log the node id */
	printf("board-id64 0x%.16llx\n", board_get_id64());
#endif
#ifdef XFER_CRC16
	/* Log the node id */
	printf("xfer crc16\n");
#endif
	printf("epoch interval %ld ticks\n", EPOCH_INTERVAL);

	/*
	 * Alloc the two syncer events
	 */
	evt_epoch_synced = process_alloc_event();
	evt_end_of_epoch = process_alloc_event();

	/*
	 * Open a `connection` on the syncer broadcasting channel
	 */
	broadcast_open(&conn, BROADCAST_CHANNEL_TIMESYNC, &broadcast_cbs);

	/*
	 * init the epoch-syncer instance
	 */
	epoch_syncer_init(&__epoch_syncer);

	/*
	 * This is the main syncer loop. Initially we try to sync the
	 * epoch between nodes without concurrently running any other
	 * algo. After a period, at which time the network is synced,
	 * we start generating epoch events which can be
	 * consumed by, e.g., the estimator process.
	 */
	etimer_set(&epoch_timer, __epoch_syncer.epoch_interval);
	__epoch_syncer.epoch_start_time = clock_time();
	__epoch_syncer.epoch_end_time = etimer_expiration_time(&epoch_timer);
	while (1) {
		/*
		 * The start of a new epoch !
		 */
		epoch_syncer_at_epoch_start(&__epoch_syncer);


		clock_time_t now;
		clock_time_t time_to_epoch_end;
			
		now = clock_time();

		assert(__epoch_syncer.epoch_end_time == etimer_expiration_time(&epoch_timer));
		assert(__epoch_syncer.epoch_end_time > now);
		time_to_epoch_end = __epoch_syncer.epoch_end_time - now;

		/* 
		 * Setup a random wait time before sending the sync packet
		 *
		 * ! we cannot let send_timer delay the epoch_timer, especially
		 *   when the next `end-of-epoch-time` has been anticipated by a lot
		 *   (this can happen at startup)
		 */
		if (time_to_epoch_end > __epoch_syncer.epoch_sync_start) {
			long int send_wait;
			long int send_wait_rnd;
			long int rnd;

			rnd = rand();
			send_wait_rnd = (unsigned)rnd % (unsigned) __epoch_syncer.epoch_sync_xfer_interval;
			send_wait = __epoch_syncer.epoch_sync_start + send_wait_rnd;
			assert(send_wait >= __epoch_syncer.epoch_sync_start);
			assert(send_wait <= __epoch_syncer.epoch_sync_start + __epoch_syncer.epoch_sync_xfer_interval);

			if (send_wait > time_to_epoch_end)
				send_wait = __epoch_syncer.epoch_sync_start;

			assert(send_wait < time_to_epoch_end);
			etimer_set(&send_timer, send_wait);

			PROCESS_WAIT_UNTIL(etimer_expired(&send_timer));

			/*
			 * Acquire the radio lock
			 *
			 * ! we don't use WAIT/YIELD_UNTIL() because
			 *   1) we do not want to yield if we can acquire the lock on the first try
			 *   2) no kernel signal is generated when the lock is released (we would `deadlock')
			 */
			do {
				if (!radio_trylock())
					break;

				PROCESS_PAUSE();
			} while (1);


			{
				clock_time_t now;
				struct epoch_sync_packet packet;
					
				/*
				 * broadcast the sync packet
				 *
				 * ! We put this part into its own block since non static stack
				 * variables/allocations in the parent block wouldn't get preserved trough
				 * kernel calls (e.g. the PROCESS_PAUSE() a few lines above)
				 */
#ifdef TRACK_CONNECTIONS
				packet.board_id16 = board_get_id16();
#endif
				packet.epoch = __epoch_syncer.epoch;

				now = clock_time();
				assert(now > __epoch_syncer.epoch_start_time);
				assert(__epoch_syncer.epoch_end_time > now);
				packet.time_from_epoch_start = now - __epoch_syncer.epoch_start_time;
				packet.time_to_epoch_end = __epoch_syncer.epoch_end_time - now;

				
#ifdef XFER_CRC16
				/*
				 * Compute the packet crc with the .crc16 field zeroed
				 */
				{
					uint16_t crc16;

					packet.crc16 = 0;
					crc16 = crc16_data((const unsigned char *)&packet,  sizeof(struct epoch_sync_packet), 0);

					packet.crc16 = crc16;
				}
#endif
				packetbuf_copyfrom(&packet, sizeof(struct epoch_sync_packet));
				broadcast_send(&conn);
			}
		} else {
			printf("epoch-syncer: skipping sync send\n");
		}
			

		/*
		 * We cannot YIELD here: if epoch_timer has already expired there won't be
		 * any event to wake us up.
		 *
		 * FIXME: if we get here and the epoch timer has fired
		 * already print by how much we are late: this can be terribly useful
		 * to trace bugs in the epoch sync code or the kernel.
		 */
		if (etimer_expired(&epoch_timer)) {
			long int now;

			now = clock_time();
			assert(now > __epoch_syncer.epoch_end_time);
			
		} else {
			char do_wait;
			do_wait = 1;

			if (__epoch_syncer.sum_sync_offsets) {
				long int avg_offset = __epoch_syncer.sum_sync_offsets / __epoch_syncer.nr_offsets;
				const long int threshold = CLOCK_SECOND;

				if (avg_offset > threshold) {
					/*
					 * if we are late don't wait until the timer expires
					 * ! this migth give us the opportunity to re-enter the right sync_xfer_interval
					 */
					do_wait = 0;
				} else if (avg_offset < -threshold) {
					/*
					 * we are too fast, delay end of epoch
					 */
					clock_time_t now;
					clock_time_t time_to_epoch_end;
			
					now = clock_time();
					assert(__epoch_syncer.epoch_end_time == etimer_expiration_time(&epoch_timer));
					assert(__epoch_syncer.epoch_end_time > now);
					time_to_epoch_end = __epoch_syncer.epoch_end_time - now;

					long int delay = time_to_epoch_end + (-avg_offset/2);

					static struct etimer delay_timer;
					trace("epoch-syncer: delaying end-of-epoch by %ld ticks\n", (-avg_offset/2));
					etimer_set(&delay_timer, delay);
					__epoch_syncer.epoch_end_time += (-avg_offset/2);

					PROCESS_WAIT_UNTIL(etimer_expired(&delay_timer));
				}
			}

			if (do_wait) {
				PROCESS_WAIT_UNTIL(etimer_expired(&epoch_timer));
			} else {
				trace("epoch-syncer: not waiting for end-of-epoch\n");
			}
		}
		trace("epoch-syncer: epoch %d ended\n",  __epoch_syncer.epoch);

#ifdef TRACK_CONNECTIONS
		connection_print_and_zero(CONNECTION_TRACK_SYNC, __epoch_syncer.epoch);
#endif

		/*
		 * Re-Set the end-of-epoch timer
		 */
		if (__epoch_syncer.epoch == EPOCHS_UNTIL_SYNCED) {
			/*
			 * We have hopefully achieved sync at this point
			 *
			 * 1) update the epoch timings, and set the epoch timer
			 *
			 * 2) signal the size-estimator process that the epoch is now synced
			 */
			__epoch_syncer.epoch_interval = EPOCH_INTERVAL;
			__epoch_syncer.epoch_sync_start = EPOCH_SYNC_START;
			__epoch_syncer.epoch_sync_xfer_interval = EPOCH_SYNC_XFER_INTERVAL;

			etimer_stop(&epoch_timer);
			etimer_set(&epoch_timer, __epoch_syncer.epoch_interval);
			/*
			 * The epoch timer has been re-set: update the time until the next epoch end
			 * Increase the epoch count.
			 * ! these operations must happen in a block which cannot block in kernel calls
			 */
			__epoch_syncer.epoch_start_time = clock_time();
			__epoch_syncer.epoch_end_time = etimer_expiration_time(&epoch_timer);
			__epoch_syncer.epoch++;

			process_post(&proc_size_estimator, evt_epoch_synced, NULL);
		} else {
			/*
			 * Re-set and adjust the epoch timer using the data received trough sync packets
			 * (in this epoch)
			 *
			 * ! using re-set (instead of, e.g., restart) is important here in order to avoid
			 *   drifting
			 */ 
			etimer_reset(&epoch_timer);

			/*
			 * The epoch timer has been re-set: update the time until the next epoch end
			 * Increase the epoch count.
			 * ! these operations must happen in a block which cannot block in kernel calls
			 */
			//__epoch_syncer.epoch_start_time = epoch_timer.timer.start;
			__epoch_syncer.epoch_start_time = clock_time();
			__epoch_syncer.epoch_end_time = etimer_expiration_time(&epoch_timer);
			__epoch_syncer.epoch++;
			if (__epoch_syncer.sum_sync_offsets) {
				long int avg_offset = __epoch_syncer.sum_sync_offsets / __epoch_syncer.nr_offsets;
				const long int threshold = 1;//(CLOCK_SECOND/32);//*3;

#if __CONTIKI_NETSTACK_RDC==__CONTIKI_NETSTACK_RDC_NULL
				const int tx_delay = 0;
#elif __CONTIKI_NETSTACK_RDC==__CONTIKI_NETSTACK_RDC_CXMAC
				/*
				 * When the cxmac RDC is used we must consider an added delay due to the fact that when
				 * other nodes radios are turned off the sync packet must be re-sent.
				 */
				const int tx_delay = 8;
#endif

				/*
				 * estimate the avg tx delay
				 */
				avg_offset += tx_delay;

				trace("epoch-syncer: sync offsets %d ~ %ld < %ld < %ld\n", __epoch_syncer.nr_offsets,  __epoch_syncer.min_offset + tx_delay, avg_offset, __epoch_syncer.max_offset+tx_delay);
				
				if ((avg_offset < -threshold) || (avg_offset > threshold)) {
					clock_time_t new_expiration_time;

					const long int adjust_threshold = CLOCK_SECOND/2;
					long int adjust;
		
					/*
					 * feedback control the next expiration time
					 */
					adjust = -avg_offset/2;
					adjust = min(adjust, adjust_threshold);
					adjust = max(adjust, -adjust_threshold);

					if (adjust)
						etimer_adjust(&epoch_timer, adjust);
						
					new_expiration_time = etimer_expiration_time(&epoch_timer);
					__epoch_syncer.epoch_end_time = new_expiration_time;
				}
			}

			if (__epoch_syncer.epoch > EPOCHS_UNTIL_SYNCED) {
				/*
				 * Signal the estimator-process that this epoch has ended
				 */
				process_post(&proc_size_estimator, evt_end_of_epoch, NULL);
			}
		}
	}

	PROCESS_END();
}