void pt2_construct(pt2 *p, PT_ID_T extent_size, PT_ID_T block_size, prov_alloc_func alloc_func, prov_free_func free_func) { /*extent_size must be power of 2 */ pt_assert((extent_size & (extent_size - 1)) == 0); /*block_size must be power of 2 */ pt_assert((block_size & (block_size - 1)) == 0); p->extent = 0; p->created = 0; p->extent_size = extent_size; p->block_size = block_size; PT_ID_T log1 = pt_log2(extent_size); PT_ID_T log2 = pt_log2(block_size); PT_ID_T log3 = pt_log2(sizeof(void *)); p->log2_capacity = log1 * 2 - log2 - log3; p->shift = log1 - log2; p->mask = (((PT_ID_T)1) << p->shift) - ((PT_ID_T)1); p->alloc_func = alloc_func; p->free_func = free_func; }
/*===========================================================================* * rs_memctl_make_vm_instance * *===========================================================================*/ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp) { int r; u32_t flags; int verify; struct vmproc *this_vm_vmp; this_vm_vmp = &vmproc[VM_PROC_NR]; pt_assert(&this_vm_vmp->vm_pt); /* Check if the operation is allowed. */ assert(num_vm_instances == 1 || num_vm_instances == 2); if(num_vm_instances == 2) { printf("VM can currently support no more than 2 VM instances at the time."); return EPERM; } /* Copy settings from current VM. */ new_vm_vmp->vm_flags |= VMF_VM_INSTANCE; num_vm_instances++; /* Pin memory for the new VM instance. */ r = map_pin_memory(new_vm_vmp); if(r != OK) { return r; } /* Preallocate page tables for the entire address space for both * VM and the new VM instance. */ flags = 0; verify = FALSE; r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt, VM_OWN_HEAPBASE, VM_DATATOP, flags, verify); if(r != OK) { return r; } r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt, VM_OWN_HEAPBASE, VM_DATATOP, flags, verify); if(r != OK) { return r; } /* Let the new VM instance map VM's page tables and its own. */ r = pt_ptmap(this_vm_vmp, new_vm_vmp); if(r != OK) { return r; } r = pt_ptmap(new_vm_vmp, new_vm_vmp); if(r != OK) { return r; } pt_assert(&this_vm_vmp->vm_pt); pt_assert(&new_vm_vmp->vm_pt); return OK; }
void protothread_deinit(state_t const s) { if (PT_DEBUG) { int i ; for (i = 0; i < PT_NWAIT; i++) { pt_assert(s->wait[i] == NULL) ; } pt_assert(s->ready == NULL) ; pt_assert(s->running == NULL) ; } }
/* should only be called by the macro pt_yield() */ void pt_enqueue_yield(pt_thread_t * const t) { state_t const s = t->s ; pt_assert(s->running == t) ; pt_add_ready(s, t) ; }
/* * Binary logarithm of value (exact if the value is a power of 2, * appoximate otherwise) */ static PT_ID_T pt_log2(PT_ID_T val) { pt_assert(val > 0); return sizeof(unsigned long) * CHAR_BIT - __builtin_clzl((unsigned long) val) - 1; }
/* should only be called by the macro pt_wait() */ void pt_enqueue_wait(pt_thread_t * const t, void * const channel) { state_t const s = t->s ; pt_thread_t ** const wq = pt_get_wait_list(s, channel) ; pt_assert(s->running == t) ; t->channel = channel ; pt_link(wq, t) ; }
bool_t pt_kill(pt_thread_t * const t) { state_t const s = t->s ; pt_assert(s->running != t) ; if (!pt_find_and_unlink(&s->ready, t)) { pt_thread_t ** const wq = pt_get_wait_list(s, t->channel) ; if (!pt_find_and_unlink(wq, t)) { return FALSE ; } } return TRUE ; }
bool_t protothread_run(state_t const s) { pt_assert(s->running == NULL) ; if (s->ready == NULL) { return FALSE ; } /* unlink the oldest ready thread */ s->running = pt_unlink_oldest(&s->ready) ; /* run the thread */ s->running->func(s->running->env) ; s->running = NULL ; /* return true if there are more threads to run */ return s->ready != NULL ; }