pok_ret_t pok_thread_sleep (uint32_t time) { uint64_t mytime; mytime = time + POK_GETTICK(); pok_sched_lock_current_thread_timed (mytime); pok_sched (); return POK_ERRNO_OK; }
void pok_sched_set_asynch_event(uint32_t thread_id, uint64_t time, pok_event_type_t type) { if ((pok_threads[thread_id].timeout != POK_NULL) && (pok_threads[thread_id].timeout->type == POK_EVENT_DELAYED_START)) { // overwrite of the previously created DELAYED_START event on NORMAL partition mode pok_threads[thread_id].timeout-> timestamp = time; #ifdef POK_NEEDS_DEBUG_O1 printf("[DEBUG_O1]\t UPDATED ASYNCH EVENT: thread %d to be activated at time ", thread_id); print_long(pok_threads[thread_id].timeout-> timestamp); printf("\n"); #endif return; } uint64_t now = POK_GETTICK(); pok_sched_asynch_event_t* new_event = POK_CURRENT_PARTITION.head_asynch_empty; uint32_t the_mask = (1 << (pok_threads[thread_id].pos - pok_partitions[pok_threads[thread_id].partition].thread_index_low)); new_event->pos = thread_id; new_event->timer = time; new_event->timestamp = now + time; new_event->mask = the_mask; new_event->type = type; if (new_event->next != POK_NULL) new_event->next->previous = POK_NULL; POK_CURRENT_PARTITION.head_asynch_empty = new_event->next; // add to temporary queue new_event->next = POK_CURRENT_PARTITION.head_asynch_temporary; //insert in head if (new_event->next != POK_NULL) new_event->next->previous = new_event; POK_CURRENT_PARTITION.head_asynch_temporary = new_event; pok_threads[thread_id].timeout = new_event; #ifdef POK_NEEDS_DEBUG_O1 if (POK_CURRENT_PARTITION.head_asynch_empty != POK_NULL || POK_CURRENT_PARTITION.head_asynch_temporary != POK_NULL || POK_CURRENT_PARTITION.head_asynch_queue != POK_NULL) { printf("**********************************************************************\n"); printf("DEBUG_O1::CREATED ASYNCH EVENT: thread %d to be activated at time ",thread_id);print_long(new_event->timestamp);printf("\n"); printf("** Empty queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_empty); printf("** Temporary queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_temporary); printf("** Actual queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_queue); printf("**********************************************************************\n"); } #endif }
pok_ret_t pok_port_sampling_status (const pok_port_id_t id, pok_port_sampling_status_t* status) { if (id > POK_CONFIG_NB_PORTS) { return POK_ERRNO_EINVAL; } if (! pok_own_port (POK_SCHED_CURRENT_PARTITION, id)) { return POK_ERRNO_PORT; } if (pok_ports[id].ready == FALSE) { return POK_ERRNO_EINVAL; } if (pok_ports[id].kind != POK_PORT_KIND_SAMPLING) { return POK_ERRNO_EINVAL; } if (pok_ports[id].partition != POK_SCHED_CURRENT_PARTITION) { return POK_ERRNO_EINVAL; } status->size = pok_ports[id].size; status->direction = pok_ports[id].direction; status->refresh = pok_ports[id].refresh; if ( (pok_ports[id].last_receive + pok_ports[id].refresh) < POK_GETTICK()) { status->validity = FALSE; } else { status->validity = TRUE; } return POK_ERRNO_OK; }
void pok_sched_service_asynch_events() { uint64_t now = POK_GETTICK(); pok_sched_asynch_event_t* current_asynch = POK_CURRENT_PARTITION.head_asynch_queue; while (current_asynch != POK_NULL && current_asynch->timestamp <= now) { POK_CURRENT_PARTITION.runnables |= current_asynch->mask; POK_CURRENT_PARTITION.head_asynch_queue = current_asynch->next; if (pok_threads[current_asynch->pos].timeout->type == POK_EVENT_DELAYED_START) { current_asynch->next = POK_CURRENT_PARTITION.head_asynch_temporary; //put the event back in the (head of) temporary queue if (POK_CURRENT_PARTITION.head_asynch_temporary != POK_NULL) POK_CURRENT_PARTITION.head_asynch_temporary->previous = current_asynch; POK_CURRENT_PARTITION.head_asynch_temporary = current_asynch; } else { current_asynch->next = POK_CURRENT_PARTITION.head_asynch_empty; //put the event in the (head of) empty queue if (POK_CURRENT_PARTITION.head_asynch_empty != POK_NULL) POK_CURRENT_PARTITION.head_asynch_empty->previous = current_asynch; POK_CURRENT_PARTITION.head_asynch_empty = current_asynch; pok_threads[current_asynch->pos].timeout = POK_NULL; current_asynch->timer =0; current_asynch->timestamp =0; current_asynch->mask =0; current_asynch->pos =0; } #ifdef POK_NEEDS_DEBUG_O1 if (POK_CURRENT_PARTITION.head_asynch_empty != POK_NULL || POK_CURRENT_PARTITION.head_asynch_temporary != POK_NULL || POK_CURRENT_PARTITION.head_asynch_queue != POK_NULL) { printf("**********************************************************************\n"); printf("DEBUG_O1::SERVICE ASYNCH EVENT: thread %d (to be activated at "); print_long(current_asynch->timestamp);printf(") has been activated at time ",current_asynch->pos);print_long(now);printf("\n"); printf("** Empty queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_empty); printf("** Temporary queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_temporary); printf("** Actual queue: ");print_queue(POK_CURRENT_PARTITION.head_asynch_queue); printf("**********************************************************************\n"); } #endif current_asynch = POK_CURRENT_PARTITION.head_asynch_queue; } }
/** * Called immediatelly after intervall interrupt * executed in kernel space **/ void pok_sched() { #if defined (POK_NEEDS_PORTS_SAMPLING) || defined (POK_NEEDS_PORTS_QUEUEING) #ifdef POK_NEEDS_SCHED_O1 /* No cache enabling in Patmos */ #ifndef POK_ARCH_PATMOS /* enable both caches */ pok_arch_cache_enable(); #endif /// manage POSTWRITE if (next_subslot_postwrite) { #ifdef POK_NEEDS_DEBUG_O1 printf("[DEBUG_O1]\t Executing POSTWRITE for slot %d for %u\n", pok_sched_current_slot, pok_postwrite_times[pok_sched_current_slot]); #endif next_timer = pok_postwrite_times[pok_sched_current_slot] * time_inter; pok_arch_set_decr(next_timer); next_subslot_postwrite = FALSE; pok_slot_write_flush (pok_current_partition); return; } #endif #endif /* (POK_NEEDS_PORTS_SAMPLING) || defined (POK_NEEDS_PORTS_QUEUEING) */ uint32_t elected_thread = 0; uint64_t now = POK_GETTICK(); #ifdef POK_NEEDS_SCHED_O1 /* No cache disabling in Patmos */ #ifndef POK_ARCH_PATMOS /* Enable both caches */ pok_arch_cache_invalidate(); #endif /* Partition elected for execution */ pok_partition_t* elected_partition = pok_elect_partition(now); /* Thread elected for execution */ elected_thread = pok_elect_thread(elected_partition,now); # if ((defined (POK_NEEDS_PORTS_SAMPLING) || defined (POK_NEEDS_PORTS_QUEUEING))) next_timer = (pok_sched_slots[pok_sched_current_slot] - pok_postwrite_times[pok_sched_current_slot]) * time_inter; if (pok_postwrite_times[pok_sched_current_slot] > 0) next_subslot_postwrite = TRUE; #else next_timer = pok_sched_slots[pok_sched_current_slot] * time_inter; #endif #ifdef POK_NEEDS_DEBUG_O1 printf("[DEBUG_O1]\t Elected partition %d and elected thread %d\n",elected_partition->partition_id, elected_thread); printf("[DEBUG_O1]\t Setting next timer to %d and switching partition\n",next_timer); printf("[DEBUG_O1]\t Partition switch\n"); #endif pok_arch_set_decr(next_timer); pok_sched_partition_switch (elected_thread); #else /* ! POK_NEEDS_SCHED_O1 */ /* End of the currently executing partition slot */ if (pok_sched_next_deadline <= now) { /* Select partition */ pok_partition_t* elected_partition = pok_elect_partition(now); elected_thread = pok_elect_thread(elected_partition,now); /* Switch partition (and context switch) */ pok_sched_partition_switch (elected_thread); } else { /* Select thread */ elected_thread = pok_elect_thread(&(pok_partitions[pok_current_partition]),now); /* Context switch */ pok_sched_context_switch (elected_thread); } #endif /* POK_NEEDS_SCHED_O1 */ }
/** * Change the current mode of the partition. Possible mode * are describe in core/partition.h. Returns * POK_ERRNO_PARTITION_MODE when requested mode is invalid. * Else, returns POK_ERRNO_OK */ pok_ret_t pok_partition_set_mode (const uint8_t pid, const pok_partition_mode_t mode) { switch (mode) { case POK_PARTITION_MODE_NORMAL: /* * We first check that a partition that wants to go * to the NORMAL mode is currently in the INIT mode */ if (pok_partitions[pid].mode == POK_PARTITION_MODE_IDLE) { return POK_ERRNO_PARTITION_MODE; } if (POK_SCHED_CURRENT_THREAD != POK_CURRENT_PARTITION.thread_main) { return POK_ERRNO_PARTITION_MODE; } pok_partitions[pid].mode = mode; /* Here, we change the mode */ pok_thread_t* thread; unsigned int i; for (i = 0; i < pok_partitions[pid].nthreads; i++) { thread = &(pok_threads[POK_CURRENT_PARTITION.thread_index_low + i]); if ((long long)thread->period == -1) {//-1 <==> ARINC INFINITE_TIME_VALUE if(thread->state == POK_STATE_DELAYED_START) { // delayed start, the delay is in the wakeup time if(!thread->wakeup_time) { thread->state = POK_STATE_RUNNABLE; } else { thread->state = POK_STATE_WAITING; } thread->wakeup_time += POK_GETTICK(); thread->end_time = thread->wakeup_time + thread->time_capacity; } } else { if(thread->state == POK_STATE_DELAYED_START) { // delayed start, the delay is in the wakeup time thread->next_activation = thread->wakeup_time + POK_CONFIG_SCHEDULING_MAJOR_FRAME + POK_CURRENT_PARTITION.activation; thread->end_time = thread->next_activation + thread->time_capacity; thread->state = POK_STATE_WAIT_NEXT_ACTIVATION; } } } pok_sched_stop_thread (pok_partitions[pid].thread_main); /* We stop the thread that call this change. All the time, * the thread that init this request is the init thread. * When it calls this function, the partition is ready and * this thread does not need no longer to be executed */ pok_sched (); /* * Reschedule, baby, reschedule ! * In fact, the init thread is stopped, we need to execute * the other threads. */ break; #ifdef POK_NEEDS_ERROR_HANDLING case POK_PARTITION_MODE_STOPPED: /* * Only the error thread can stop the partition */ if ((POK_CURRENT_PARTITION.thread_error == 0 ) || (POK_SCHED_CURRENT_THREAD != POK_CURRENT_PARTITION.thread_error)) { return POK_ERRNO_PARTITION_MODE; } pok_partitions[pid].mode = mode; /* Here, we change the mode */ pok_sched (); break; case POK_PARTITION_MODE_INIT_WARM: case POK_PARTITION_MODE_INIT_COLD: if (pok_partitions[pid].mode == POK_PARTITION_MODE_INIT_COLD && mode == POK_PARTITION_MODE_INIT_WARM) { return POK_ERRNO_PARTITION_MODE; } /* * Check that only the error thread can restart the partition */ if ((POK_CURRENT_PARTITION.thread_error == 0 ) || (POK_SCHED_CURRENT_THREAD != POK_CURRENT_PARTITION.thread_error)) { return POK_ERRNO_PARTITION_MODE; } /* * The partition fallback in the INIT_WARM mode when it * was in the NORMAL mode. So, we check the previous mode */ pok_partitions[pid].mode = mode; /* Here, we change the mode */ pok_partition_reinit (pid); pok_sched (); break; #endif default: return POK_ERRNO_PARTITION_MODE; break; } return POK_ERRNO_OK; }
pok_ret_t pok_port_queueing_send (const pok_port_id_t id, const void* data, const pok_port_size_t len, uint64_t timeout) { pok_lockobj_lockattr_t lockattr; (void) lockattr; pok_ret_t ret; if (id > POK_CONFIG_NB_PORTS) { return POK_ERRNO_EINVAL; } if (len <= 0) { return POK_ERRNO_SIZE; } if (data == NULL) { return POK_ERRNO_EINVAL; } if (! pok_own_port (POK_SCHED_CURRENT_PARTITION, id)) { return POK_ERRNO_PORT; } if (pok_ports[id].ready != TRUE) { return POK_ERRNO_PORT; } if (len > pok_ports[id].size) { return POK_ERRNO_SIZE; } if (pok_ports[id].direction != POK_PORT_DIRECTION_OUT) { return POK_ERRNO_DIRECTION; } if (pok_ports[id].partition != POK_SCHED_CURRENT_PARTITION) { return POK_ERRNO_EPERM; } if (pok_ports[id].kind != POK_PORT_KIND_QUEUEING) { return POK_ERRNO_KIND; } ret = pok_lockobj_lock (&pok_ports[id].lock, NULL); if (ret != POK_ERRNO_OK) { return ret; } if (timeout != 0) { timeout = timeout + POK_GETTICK(); } while (len > pok_port_available_size (id)) { if (timeout == 0) { pok_lockobj_unlock (&pok_ports[id].lock, NULL); return POK_ERRNO_FULL; } else { ret = pok_lockobj_eventwait (&pok_ports[id].lock, timeout); if (ret != POK_ERRNO_OK) { pok_lockobj_unlock (&pok_ports[id].lock, NULL); return (ret); } } } pok_port_write (id, data, len); pok_ports[id].must_be_flushed = TRUE; ret = pok_lockobj_unlock (&pok_ports[id].lock, NULL); if (ret != POK_ERRNO_OK) { return ret; } return POK_ERRNO_OK; }
/* * pok_lockobj_eventwait */ pok_ret_t pok_lockobj_eventwait (pok_lockobj_t* obj, const uint64_t timeout) { pok_ret_t ret; //SPIN_LOCK (obj->eventspin); if (obj->initialized == FALSE) { return POK_ERRNO_LOCKOBJ_NOTREADY; } if (obj->kind != POK_LOCKOBJ_KIND_EVENT) { return POK_ERRNO_EINVAL; } #ifndef POK_NEEDS_SCHED_O1_SPLIT // Thre si no need to wait if (obj->is_locked == FALSE){ #ifdef DEBUG_LOCK printf ("EVENT is already UP!"); #endif return POK_ERRNO_OK; } #else /* POK_NEEDS_SCHED_O1_SPLIT is defined */ /* with the split solution we have to use the executed_predecessors bitmask to check if * the predecessor of the calling thread has executed; if the calling thread has not a * predecessor (i.e. it is not a successor) its bit in executed_processor is always 1 * so the condition is always true (i.e. the condition is only "the event is UP") */ if ( ( (POK_CURRENT_PARTITION.executed_predecessors & (1 << (POK_CURRENT_THREAD.pos - POK_CURRENT_PARTITION.thread_index_low))) & ~(obj->is_locked) ) != 0 ) { return POK_ERRNO_OK; } #endif #ifndef POK_NEEDS_SCHED_O1_SPLIT obj->thread_state[POK_SCHED_CURRENT_THREAD] = LOCKOBJ_STATE_WAITEVENT; #else /* POK_NEEDS_SCHED_O1_SPLIT is defined */ /* current thread has to wait, set its bit in the bitmask */ obj->waiting_threads |= (1 << (POK_CURRENT_THREAD.pos - POK_CURRENT_PARTITION.thread_index_low)); #endif /* NO SUPPORT FOR TIMEOUT RIGHT NOW */ pok_sched_lock_current_thread (); //SPIN_UNLOCK (obj->eventspin); #ifndef POK_NEEDS_SCHED_O1_SPLIT obj->thread_state[POK_SCHED_CURRENT_THREAD] = LOCKOBJ_STATE_UNLOCK; #else /* POK_NEEDS_SCHED_O1_SPLIT is defined*/ /* reset the bit of the new current thread */ obj->waiting_threads &= ~(1 << (POK_CURRENT_THREAD.pos - POK_CURRENT_PARTITION.thread_index_low)); /* if the unlocked thread is a successor reset its bit in executed_predecessor */ POK_CURRENT_PARTITION.executed_predecessors &= ~(1 << (POK_CURRENT_THREAD.pos - POK_CURRENT_PARTITION.thread_index_low)); #endif /* Here, we come back after we wait*/ if ((timeout != 0 ) && (POK_GETTICK() >= timeout)) { ret = POK_ERRNO_TIMEOUT; } else { ret = POK_ERRNO_OK; } //SPIN_UNLOCK (obj->eventspin); return ret; }
pok_ret_t pok_port_write (const uint8_t pid, const void *data, const pok_port_size_t size) { #ifdef POK_NEEDS_PORTS_QUEUEING pok_port_size_t tmp_size; pok_port_size_t tmp_size2; #endif switch (pok_ports[pid].kind) { #ifdef POK_NEEDS_PORTS_QUEUEING case POK_PORT_KIND_QUEUEING: if (pok_ports[pid].full == TRUE) { return POK_ERRNO_SIZE; } if (size > pok_ports[pid].size) { return POK_ERRNO_SIZE; } if ((pok_ports[pid].off_e + size) > pok_ports[pid].size) { tmp_size = pok_ports[pid].size - pok_ports[pid].off_e; memcpy (&pok_queue.data[pok_ports[pid].index + pok_ports[pid].off_e], data, tmp_size); tmp_size2 = size - tmp_size; memcpy (&pok_queue.data[pok_ports[pid].index], data + tmp_size, tmp_size2); } else { memcpy (&pok_queue.data[pok_ports[pid].index + pok_ports[pid].off_e], data, size); } pok_ports[pid].off_e = (pok_ports[pid].off_e + size) % pok_ports[pid].size; if (pok_ports[pid].off_e == pok_ports[pid].off_b) { pok_ports[pid].full = TRUE; } pok_ports[pid].empty = FALSE; return POK_ERRNO_OK; break; #endif #ifdef POK_NEEDS_PORTS_SAMPLING case POK_PORT_KIND_SAMPLING: if (size > pok_ports[pid].size) { return POK_ERRNO_SIZE; } memcpy (&pok_queue.data[pok_ports[pid].index + pok_ports[pid].off_e], data, size); pok_ports[pid].empty = FALSE; pok_ports[pid].last_receive = POK_GETTICK (); return POK_ERRNO_OK; break; #endif default: return POK_ERRNO_EINVAL; } }