/*---------------------------------------------------------------------------* * Routine: sys_arch_sem_wait *---------------------------------------------------------------------------* * Description: * Blocks the thread while waiting for the semaphore to be * signaled. If the "timeout" argument is non-zero, the thread should * only be blocked for the specified time (measured in * milliseconds). * * If the timeout argument is non-zero, the return value is the number of * milliseconds spent waiting for the semaphore to be signaled. If the * semaphore wasn't signaled within the specified time, the return value is * SYS_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore * (i.e., it was already signaled), the function may return zero. * * Notice that lwIP implements a function with a similar name, * sys_sem_wait(), that uses the sys_arch_sem_wait() function. * Inputs: * sys_sem_t sem -- Semaphore to wait on * u32_t timeout -- Number of milliseconds until timeout * Outputs: * u32_t -- Time elapsed or SYS_ARCH_TIMEOUT. *---------------------------------------------------------------------------*/ u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout) { u32_t start = us_ticker_read(); if (osSemaphoreWait(sem->id, (timeout != 0)?(timeout):(osWaitForever)) < 1) return SYS_ARCH_TIMEOUT; return (us_ticker_read() - start) / 1000; }
void wait_us(int us) { uint32_t start = us_ticker_read(); while (1) // ((us_ticker_read() - start) < (uint32_t)us) { uint32_t t = us_ticker_read(); uint32_t delta = t - start; if (delta > us) break; } }
/*---------------------------------------------------------------------------* * Routine: sys_arch_mbox_fetch *---------------------------------------------------------------------------* * Description: * Blocks the thread until a message arrives in the mailbox, but does * not block the thread longer than "timeout" milliseconds (similar to * the sys_arch_sem_wait() function). The "msg" argument is a result * parameter that is set by the function (i.e., by doing "*msg = * ptr"). The "msg" parameter maybe NULL to indicate that the message * should be dropped. * * The return values are the same as for the sys_arch_sem_wait() function: * Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a * timeout. * * Note that a function with a similar name, sys_mbox_fetch(), is * implemented by lwIP. * Inputs: * sys_mbox_t mbox -- Handle of mailbox * void **msg -- Pointer to pointer to msg received * u32_t timeout -- Number of milliseconds until timeout * Outputs: * u32_t -- SYS_ARCH_TIMEOUT if timeout, else number * of milliseconds until received. *---------------------------------------------------------------------------*/ u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) { u32_t start = us_ticker_read(); osEvent event = osMessageGet(mbox->id, (timeout != 0)?(timeout):(osWaitForever)); if (event.status != osEventMessage) return SYS_ARCH_TIMEOUT; *msg = (void *)event.value.v; return (us_ticker_read() - start) / 1000; }
/*---------------------------------------------------------------------------* * Routine: sys_arch_sem_wait *---------------------------------------------------------------------------* * Description: * Blocks the thread while waiting for the semaphore to be * signaled. If the "timeout" argument is non-zero, the thread should * only be blocked for the specified time (measured in * milliseconds). * * If the timeout argument is non-zero, the return value is the number of * milliseconds spent waiting for the semaphore to be signaled. If the * semaphore wasn't signaled within the specified time, the return value is * SYS_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore * (i.e., it was already signaled), the function may return zero. * * Notice that lwIP implements a function with a similar name, * sys_sem_wait(), that uses the sys_arch_sem_wait() function. * Inputs: * sys_sem_t sem -- Semaphore to wait on * u32_t timeout -- Number of milliseconds until timeout * Outputs: * u32_t -- Time elapsed or SYS_ARCH_TIMEOUT. *---------------------------------------------------------------------------*/ u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout) { u32_t start = us_ticker_read(); if (timeout != 0) { if (twai_sem(sem->id, timeout) == E_TMOUT) { // if (osSemaphoreWait(sem->id, (timeout != 0)?(timeout):(osWaitForever)) < 1) return SYS_ARCH_TIMEOUT; } } else { wai_sem(sem->id); } return (us_ticker_read() - start) / 1000; }
/** * uint32_t suli_pin_pulse_in(IO_T *, what_state, timeout) */ uint32_t suli_pin_pulse_in(IO_T *pio, int state, uint32_t timeout) { //TODO: more efficient implementation uint32_t t = us_ticker_read(); while (gpio_read(pio) != state) { if (timeout > 0 && (us_ticker_read() - t) > timeout) return 0; } uint32_t t1 = us_ticker_read(); while (gpio_read(pio) == state) { if (timeout > 0 && (us_ticker_read() - t) > timeout) return 0; } return us_ticker_read() - t1 /*- ??? some wasting code consumes some time */; }
void MaximBLE::callDispatcher(void) { static uint32_t lastTimeUs = us_ticker_read(); uint32_t currTimeUs, deltaTimeMs; // Update the current Wicentric time currTimeUs = us_ticker_read(); deltaTimeMs = (currTimeUs - lastTimeUs) / 1000; if (deltaTimeMs > 0) { WsfTimerUpdate(deltaTimeMs); lastTimeUs += deltaTimeMs * 1000; } wsfOsDispatcher(); }
void advertisementCallback(const Gap::AdvertisementCallbackParams_t *params) { //no device saved, find one if (peer0 == 0 && peer1 == 0){ //note, Polar only advertises these right after power on if(isHeartrate(params->advertisingData, params->advertisingDataLen)){ peer0 = params->peerAddr[0]; peer1 = params->peerAddr[1]; } return; } //if this is our device, get its heart rate from advertising packet if (params->peerAddr[0] == peer0 && params->peerAddr[1] == peer1) { lastAdvertisement = us_ticker_read(); uint8_t heartRate = getHeartRate(params->advertisingData, params->advertisingDataLen); #ifdef LED if(heartRate){ // printf("hr: %d\r\n", heartRate); // printf("offtime: %d\r\n", offTime); offTime = (-8.33f * (float)heartRate) + (1500.0f - (float)onTime ); } #endif } }
clock_t clock() { _mutex->lock(); clock_t t = us_ticker_read(); t /= 1000000 / CLOCKS_PER_SEC; // convert to processor time _mutex->unlock(); return t; }
void Spindle::on_pin_rise() { uint32_t timestamp = us_ticker_read(); last_time = timestamp - last_edge; last_edge = timestamp; irq_count++; }
void irq_handler(void) { us_ticker_clear_interrupt(); /* Go through all the pending TimerEvents */ while (1) { if (head == NULL) { // There are no more TimerEvents left, so disable matches. us_ticker_disable_interrupt(); return; } if ((int)(head->timestamp - us_ticker_read()) <= 0) { // This event was in the past: // point to the following one and execute its handler ticker_event_t *p = head; head = head->next; event_handler(p->id); // NOTE: the handler can set new events } else { // This event and the following ones in the list are in the future: // set it as next interrupt and return us_ticker_set_interrupt(head->timestamp); return; } } }
void us_ticker_set_interrupt(timestamp_t timestamp) { /* We get here absolute interrupt time which takes into account counter overflow. * Since we use additional count-down timer to generate interrupt we need to calculate * load value based on time-stamp. */ const uint32_t now_ticks = us_ticker_read(); uint32_t delta_ticks = timestamp >= now_ticks ? timestamp - now_ticks : (uint32_t)((uint64_t) timestamp + 0xFFFFFFFF - now_ticks); if (delta_ticks == 0) { /* The requested delay is less than the minimum resolution of this counter. */ delta_ticks = 1; } us_ticker_int_counter = (uint32_t)(delta_ticks >> 16); us_ticker_int_remainder = (uint16_t)(0xFFFF & delta_ticks); TPM_StopTimer(TPM2); TPM2->CNT = 0; if (us_ticker_int_counter > 0) { TPM2->MOD = 0xFFFF; us_ticker_int_counter--; } else { TPM2->MOD = us_ticker_int_remainder; us_ticker_int_remainder = 0; } /* Clear the count and set match value */ TPM_ClearStatusFlags(TPM2, kTPM_TimeOverflowFlag); TPM_EnableInterrupts(TPM2, kTPM_TimeOverflowInterruptEnable); TPM_StartTimer(TPM2, kTPM_SystemClock); }
clock_t clock() { core_util_critical_section_enter(); clock_t t = us_ticker_read(); t /= 1000000 / CLOCKS_PER_SEC; // convert to processor time core_util_critical_section_exit(); return t; }
void us_ticker_irq_handler(void) { us_ticker_clear_interrupt(); /* Go through all the pending TimerEvents */ while (1) { if (head == NULL) { // There are no more TimerEvents left, so disable matches. us_ticker_disable_interrupt(); return; } if ((int)(head->timestamp - us_ticker_read()) <= 0) { // This event was in the past: // point to the following one and execute its handler ticker_event_t *p = head; head = head->next; if (event_handler != NULL) { event_handler(p->id); // NOTE: the handler can set new events } /* Note: We continue back to examining the head because calling the * event handler may have altered the chain of pending events. */ } else { // This event and the following ones in the list are in the future: // set it as next interrupt and return us_ticker_set_interrupt(head->timestamp); return; } } }
/** * Implementation of jerry_port_get_current_time. * * @return current timer's counter value in milliseconds */ double jerry_port_get_current_time (void) { static uint64_t last_tick = 0; static time_t last_time = 0; static uint32_t skew = 0; uint64_t curr_tick = us_ticker_read (); /* The value is in microseconds. */ time_t curr_time = time(NULL); /* The value is in seconds. */ double result = curr_time * 1000; /* The us_ticker_read () has an overflow for each UINT_MAX microseconds * (~71 mins). For each overflow event the ticker-based clock is about 33 * milliseconds fast. Without a timer thread the milliseconds part of the * time can be corrected if the difference of two get_current_time calls * are within the mentioned 71 mins. Above that interval we can assume * that the milliseconds part of the time is negligibe. */ if (curr_time - last_time > (time_t)(((uint32_t)-1) / 1000000)) { skew = 0; } else if (last_tick > curr_tick) { skew = (skew + 33) % 1000; } result += (curr_tick / 1000 - skew) % 1000; last_tick = curr_tick; last_time = curr_time; return result; } /* jerry_port_get_current_time */
void us_ticker_set_interrupt(unsigned int timestamp) { if (!us_ticker_inited) { us_ticker_init(); } US_TICKER_TIMER->TASKS_CAPTURE[0] = 1; uint16_t tsUpper16 = (uint16_t)((timestamp - us_ticker_read()) >> 16); if (tsUpper16>0) { if ((timeStamp ==0) || (timeStamp> tsUpper16)) { timeStamp = tsUpper16; } } else { US_TICKER_TIMER->INTENSET |= TIMER_INTENSET_COMPARE0_Set << TIMER_INTENSET_COMPARE0_Pos; US_TICKER_TIMER->CC[0] += timestamp - us_ticker_read(); } }
void us_ticker_set_interrupt(timestamp_t timestamp) { uint32_t delta = timestamp - us_ticker_read(); US_TICKER_INTERRUPT->TimerControl &= ~CMSDK_DUALTIMER2_CTRL_EN_Msk; // disable TIMER2 US_TICKER_INTERRUPT->TimerLoad = delta; // Set TIMER2 load value US_TICKER_INTERRUPT->TimerControl |= CMSDK_DUALTIMER2_CTRL_INTEN_Msk; // enable TIMER2 interrupt US_TICKER_INTERRUPT->TimerControl |= CMSDK_DUALTIMER2_CTRL_EN_Msk; // enable TIMER2 counter NVIC_EnableIRQ(US_TICKER_TIMER_IRQn); }
void Conveyor::check_queue(bool force) { static uint32_t last_time_check = us_ticker_read(); if(queue.is_empty()) { allow_fetch = false; last_time_check = us_ticker_read(); // reset timeout return; } // if we have been waiting for more than the required waiting time and the queue is not empty, or the queue is full, then allow stepticker to get the tail // we do this to allow an idle system to pre load the queue a bit so the first few blocks run smoothly. if(force || queue.is_full() || (us_ticker_read() - last_time_check) >= (queue_delay_time_ms * 1000)) { last_time_check = us_ticker_read(); // reset timeout if(!flush) allow_fetch = true; return; } }
void startScan() { // printf("start scan"); lastAdvertisement = us_ticker_read(); #ifdef LED ledCallback = minar::Scheduler::postCallback(toggle).getHandle(); #endif scanCallback = minar::Scheduler::postCallback(timeout).period(minar::milliseconds(ADVERTISEMENT_TIMEOUT_SECONDS * 1000)).getHandle(); BLE::Instance().gap().startScan(advertisementCallback); }
void us_ticker_set_interrupt(timestamp_t timestamp) { int delta = (int)(timestamp - us_ticker_read()); if (delta <= 0) { // This event was in the past: delta = 1; } timer_start(US_TICKER_TIMER, delta, FALSE); NVIC_EnableIRQ(TIMER1_IRQn); }
/*---------------------------------------------------------------------------* * Routine: sys_arch_mbox_fetch *---------------------------------------------------------------------------* * Description: * Blocks the thread until a message arrives in the mailbox, but does * not block the thread longer than "timeout" milliseconds (similar to * the sys_arch_sem_wait() function). The "msg" argument is a result * parameter that is set by the function (i.e., by doing "*msg = * ptr"). The "msg" parameter maybe NULL to indicate that the message * should be dropped. * * The return values are the same as for the sys_arch_sem_wait() function: * Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a * timeout. * * Note that a function with a similar name, sys_mbox_fetch(), is * implemented by lwIP. * Inputs: * sys_mbox_t mbox -- Handle of mailbox * void **msg -- Pointer to pointer to msg received * u32_t timeout -- Number of milliseconds until timeout * Outputs: * u32_t -- SYS_ARCH_TIMEOUT if timeout, else number * of milliseconds until received. *---------------------------------------------------------------------------*/ u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) { u32_t start = us_ticker_read(); // osEvent event = osMessageGet(mbox->id, (timeout != 0)?(timeout):(osWaitForever)); //if (event.status != osEventMessage) if (timeout != 0) { if (trcv_dtq(mbox->id, (intptr_t *)msg, timeout) == E_TMOUT) { return SYS_ARCH_TIMEOUT; } } else { // if timeout == 0 then wait forever. ER ercd = rcv_dtq(mbox->id, (intptr_t *)msg); if (ercd != E_OK) { error("rcv_dtq returned with error= %d\n", ercd); } } // *msg = (void *)event.value.v; return (us_ticker_read() - start) / 1000; }
static void trng_get(unsigned char *pConversionData) { uint32_t *p32ConversionData; p32ConversionData = (uint32_t *)pConversionData; PRNG_Open(PRNG_KEY_SIZE_256, 1, us_ticker_read()); crypto_prng_prestart(); PRNG_Start(); crypto_prng_wait(); PRNG_Read(p32ConversionData); }
void us_ticker_set_interrupt(timestamp_t timestamp) { uint32_t now_us, delta_us; now_us = us_ticker_read(); delta_us = timestamp >= now_us ? timestamp - now_us : (uint32_t)((uint64_t)timestamp + 0xFFFFFFFF - now_us); PIT_StopTimer(PIT, kPIT_Chnl_3); PIT_StopTimer(PIT, kPIT_Chnl_2); PIT_SetTimerPeriod(PIT, kPIT_Chnl_3, (uint32_t)delta_us); PIT_EnableInterrupts(PIT, kPIT_Chnl_3, kPIT_TimerInterruptEnable); PIT_StartTimer(PIT, kPIT_Chnl_3); PIT_StartTimer(PIT, kPIT_Chnl_2); }
// This is used to call back into the Kernel using a WellKnownMethod static void MbedInterruptHandler(uint32_t id) { if (s_TimerCallback != NULL) { LLOS_MbedTimer *pCtx = (LLOS_MbedTimer*)id; if (pCtx != NULL) { uint64_t ticks = us_ticker_read(); s_TimerCallback(pCtx->Context, ticks); } } }
void sys_init(void) { T_CSEM csem; us_ticker_read(); // Init sys tick // lwip_sys_mutex = osMutexCreate(osMutex(lwip_sys_mutex)); // create a semaphore because lwip_sys_mutex is used for mutual exclusion csem.sematr = NULL; csem.isemcnt = 1; csem.maxsem = 1; lwip_sys_mutex = acre_sem(&csem); // if (lwip_sys_mutex == NULL) if (lwip_sys_mutex < 0) { error("sys_init error\n"); } }
void servo_handler(void) { while (1) { unsigned int max_count=0; for (int i=0; i<ServoCount; i++) { if ( servos[i].Pin.isActive == true ) { if ( servos[i].us > 0 ) { digitalWrite(servos[i].Pin.nbr, HIGH); if ( servos[i].us > max_count ) max_count = servos[i].us; } } } uint32_t start=0, current=0; uint32_t spend=0; start = us_ticker_read(); uint8_t loop = true; while ( loop ) { current = us_ticker_read(); if ( current >= start ) spend = current- start; else spend = 0xFFFFFFFF - start + current; for (int i=0; i<ServoCount; i++) { if ( servos[i].Pin.isActive == true ) { if ( spend >= servos[i].us - delta_time ) { digitalWrite(servos[i].Pin.nbr, LOW); loop = false; } } } } delay( (20000-2500*ServoCount)/1000 ); } }
void DispatcherPrivate::fire_channel(SYNCHRONIZATION_CHANNEL* channel) { channel->data->state->fired = true; for (int i = 0; i < MAX_SEND_DATA; ++i) { SendData* sd = &channel->data->state->send[i]; if (sd->pin == NC) continue; switch (sd->mode) { case SendChannelModeNone: break; case SendChannelModeSet: gpio_write(&sd->gpio, 1); break; case SendChannelModeToggle: sd->last_write = 1 - sd->last_write; gpio_write(&sd->gpio, sd->last_write); break; case SendChannelModeReset: gpio_write(&sd->gpio, 0); break; case SendChannelModePulseUp: gpio_write(&sd->gpio, 1); sd->insert(us_ticker_read() + sd->pulse_length_us); break; case SendChannelModePulseDown: gpio_write(&sd->gpio, 0); sd->insert(us_ticker_read() + sd->pulse_length_us); break; } } if (channel->fired != NULL) channel->fired(channel); osSignalSet(_dispid, SIGNAL_CHANGED); }
int GPSSensorUBX::ReceiveFrame(uint32_t timeOutUs){ bool reading = false; int i = 0; int j = 0; int jMax = 10; uint32_t deadLine = us_ticker_read() + timeOutUs; while(1) { //DBG("PIPI"); if(gps.readable()) { j = 0; //DBG("Rx"); recvBuff[i++] = gps.getc(); reading = true; } else { // DBG("CACA"); // Was reading and become unreadable if(reading) { if(j<jMax) { j++; Thread::wait(4); } else { DBG_MEMDUMP("Received", (const char*) recvBuff, i); return i; } } else { if(us_ticker_read()>deadLine) { DBG("TimedOut"); return 0; } else { Thread::wait(10); //DBG("ZIZI"); } } } } }
//TIMER1 is used for Timestamped interrupts (Ticker(), Timeout()) void us_ticker_set_interrupt(timestamp_t timestamp) { // MRT source clock is SystemCoreClock (30MHz) and MRT is a 31-bit countdown timer // Force load interval value (Bit 0-30 is interval value, Bit 31 is Force Load bit) // Note: The MRT has less counter headroom available than the typical mbed 32bit timer @ 1 MHz. // The calculated counter interval until the next timestamp will be truncated and an // 'early' interrupt will be generated in case the max required count interval exceeds // the available 31 bits space. However, the mbed us_ticker interrupt handler will // check current time against the next scheduled timestamp and simply re-issue the // same interrupt again when needed. The calculated counter interval will now be smaller. LPC_MRT->INTVAL1 = (((timestamp - us_ticker_read()) * MRT_Clock_MHz) | 0x80000000UL); // Enable interrupt LPC_MRT->CTRL1 |= 1; }
void us_ticker_set_interrupt(timestamp_t timestamp) { uint32_t timer_value = 0; int delta = 0; if (!us_ticker_inited) us_ticker_init(); delta = (int)(timestamp - us_ticker_read()); if (delta <= 0) { // This event was in the past: us_ticker_irq_handler(); return; } timer_value = (delta)*25; // enable interrupt US_TICKER_TIMER1->TimerControl = 0x0; // disable timer US_TICKER_TIMER1->TimerControl = 0x62; // enable interrupt and set to 32 bit counter and set to periodic mode US_TICKER_TIMER1->TimerLoad = (delta)*25; //initialise the timer value US_TICKER_TIMER1->TimerControl |= 0x80; //enable timer }
void us_ticker_set_interrupt(timestamp_t timestamp) { TIMER_Stop((TIMER_T *) NU_MODBASE(timer1hires_modinit.modname)); int delta = (int) (timestamp - us_ticker_read()); if (delta > 0) { cd_major_minor_us = delta * US_PER_TICK; us_ticker_arm_cd(); } else { cd_major_minor_us = cd_minor_us = 0; /** * This event was in the past. Set the interrupt as pending, but don't process it here. * This prevents a recurive loop under heavy load which can lead to a stack overflow. */ NVIC_SetPendingIRQ(timer1hires_modinit.irq_n); } }