T *__a_unserializet(char *dir_path,char *name) { char fn[1000]; __a_vm_fn(fn,dir_path,name); S *s = readFile(fn,0); H h = _m_unserialize(s); free(s); T *t = _t_new_from_m(h); _m_free(h); return t; }
/** * build mtree from serialized mtree data * * @params[in] pointer to serialized data * @returns handle to new mtree * */ H _m_unserialize(S *s) { M *m = malloc(sizeof(M)); m->magic = s->magic; m->levels = s->levels; m->lP = malloc(sizeof(L)*m->levels); H h = {m,{0,0}}; void *blob = s->blob_offset + (void *)s; uint32_t s_size = SERIALIZED_HEADER_SIZE(m->levels); for(h.a.l=0; h.a.l<m->levels; h.a.l++) { L *sl = (L *) (((void *)s) + s_size + ((S *)s)->level_offsets[h.a.l]); L *l = GET_LEVEL(h); l->nodes = sl->nodes; l->nP = malloc(sizeof(N)*l->nodes); N *sn = sizeof(Mindex)+(void *)sl; for(h.a.i=0;h.a.i < l->nodes;h.a.i++) { N *n = GET_NODE(h,l); *n = *sn; void *surface = blob+*(size_t *)&sn->surface; if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) { if (!(n->flags & TFLAG_ALLOCATED)) { raise_error("whoa! orthogonal tree handles are supposed to be allocated!"); } H sh = _m_unserialize((S *)surface); n->surface = malloc(sizeof(H)); memcpy(n->surface,&sh,sn->size); } else if (n->flags & TFLAG_ALLOCATED) { n->surface = malloc(sn->size); memcpy(n->surface,surface,sn->size); } else { memcpy(&n->surface,&sn->surface,sn->size); } sn = (N *) (SERIALIZED_NODE_SIZE + ((void*)sn)); } } h.a.i = h.a.l = 0; return h; }
/** * bootstrap the ceptr system * * starts up the vmhost and wakes up receptors that should be running in it. * * @TODO check the compository to verify our version of the vmhost * */ void _a_boot(char *dir_path) { // check if the storage directory exists struct stat st = {0}; if (stat(dir_path, &st) == -1) { // if no directory we are firing up an initial instance, so // create directory mkdir(dir_path,0700); // instantiate a VMHost object G_vm = _v_new(); // create the basic receptors that all VMHosts have _v_instantiate_builtins(G_vm); } else { char fn[1000]; void *buffer; // unserialize the semtable base tree SemTable *sem = _sem_new(); T *t = __a_unserializet(dir_path,SEM_FN); sem->stores[0].definitions = t; // restore definitions to the correct store slots T *paths = __a_unserializet(dir_path,PATHS_FN); int i = 0; int c = _t_children(paths); for(i=1;i<=c;i++) { T *p = _t_child(paths,i); if (semeq(RECEPTOR_PATH,_t_symbol(p))) { T *x = _t_get(t,(int *)_t_surface(p)); sem->stores[i-1].definitions = x; } } _t_free(paths); sem->contexts = c+1; // unserialize all of the vmhost's instantiated receptors and other instances __a_vmfn(fn,dir_path); buffer = readFile(fn,0); Receptor *r = _r_unserialize(sem,buffer); G_vm = __v_init(r,sem); free(buffer); // unserialize other vmhost state data S *s; __a_vm_state_fn(fn,dir_path); s = readFile(fn,0); H h = _m_unserialize(s); free(s); H hars; hars.m=h.m; hars.a = _m_child(h,1); // first child is ACTIVE_RECEPTORS H har; har.m=h.m; int j = _m_children(hars); for (i=1;i<=j;i++) { har.a = _m_child(hars,i); if(!semeq(_m_symbol(har),RECEPTOR_XADDR)) raise_error("expecting RECEPTOR_XADDR!"); _v_activate(G_vm,*(Xaddr *)_m_surface(har)); } _m_free(h); } G_vm->dir = dir_path; // _a_check_vm_host_version_on_the_compository(); _v_start_vmhost(G_vm); }