unsigned long pageaddphyszone(uintptr_t base, struct physpage **zone, unsigned long nb) { uintptr_t adr = rounduppow2(base, PAGESIZE); struct physpage *page = &vmphystab[pagenum(adr)]; uint32_t *pte = (uint32_t *)&_pagetab + vmpagenum(adr); unsigned long n = rounduppow2(nb - adr, PAGESIZE) >> PAGESIZELOG2; unsigned long size = n * PAGESIZE; adr += n << PAGESIZELOG2; page += n; vmpagestat.nphys = n; kprintf("reserving %ld (%lx) maps @ %p (%lx)\n", n, n, vmphystab, pagenum(base)); while (n--) { if (!*pte) { page--; page->adr = adr; page->nflt = 0; queuepush(page, zone); adr -= PAGESIZE; } pte++; } return size; }
unsigned long pageinitphyszone(uintptr_t base, struct physpage **zone, unsigned long nb) { struct physpage *page = &vmphystab[pagenum(base)]; uintptr_t adr = rounduppow2(base, PAGESIZE); unsigned long n = rounduppow2(nb - adr, PAGESIZE) >> PAGESIZELOG2; unsigned long size = n * PAGESIZE; adr += n << PAGESIZELOG2; page += n; vmpagestat.nphys = n; kprintf("initializing %ld (%lx) pages @ %p (%lu, %lu)\n", n, n, vmphystab, pagenum(base), n, adr, adr + size); while (n--) { page--; page->adr = adr; page->nflt = 0; queuepush(page, zone); adr -= PAGESIZE; } return size; }
unsigned long meminitpool(struct mempool *physpool, uintptr_t base, size_t nb) { uintptr_t adr = base; // unsigned long sz = (nb & (MEMMIN - 1)) ? rounddownpow2(nb, MEMMIN) : nb; size_t sz = nb; intptr_t ofs = base & (MEMMIN - 1); size_t nblk; size_t hdrsz; if (ofs) { adr += MEMMIN - ofs; sz -= adr - base; } nblk = sz >> MEMMINLOG2; /* configure slab headers */ hdrsz = nblk * sizeof(struct memslab); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu slab headers\n", hdrsz, nblk); #endif vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); physpool->nblk = nblk; physpool->blktab = (void *)adr; adr += hdrsz; // kbzero((void *)adr, hdrsz); /* configure magazine headers */ hdrsz = nblk * sizeof(struct memmag); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu magazine headers\n", hdrsz, nblk); #endif memvirtpool.nblk = nblk; memvirtpool.blktab = (void *)adr; vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); // kbzero((void *)adr, hdrsz); adr += hdrsz; memvirtpool.base = adr; memphyspool.base = adr; #if (__KERNEL__ && (MEMDIAG)) memdiag(memvirtpool); #endif return adr; }
static struct zastoken * zasprocinst(struct zastoken *token, zasmemadr_t adr, zasmemadr_t *retadr) { #if (WPM) struct wpmopcode *op = NULL; #elif (ZEN) struct zpuop *op = NULL; #endif #if (WPMVEC) struct vecopcode *vop = NULL; #endif zasmemadr_t opadr = rounduppow2(adr, 4); struct zastoken *token1 = NULL; struct zastoken *token2 = NULL; struct zastoken *retval = NULL; struct zassymrec *sym; uint8_t narg = token->data.inst.narg; // uint8_t len = token->data.inst.op == OPNOP ? 1 : 4; uint8_t len = 4; while (adr < opadr) { #if (WPM) physmem[adr] = OPNOP; #endif adr++; } // adr = opadr; #if (ZASDB) zasaddline(adr, token->data.inst.data, token->file, token->line); #endif #if (WPMVEC) if (token->unit == UNIT_VEC) { vop = (struct vecopcode *)&zvm.physmem[adr]; vop->inst = token->data.inst.op; vop->unit = UNIT_VEC; vop->flg = token->opflg; token1 = token->next; zasfreetoken(token); if (token1) { switch(token1->type) { case ZASTOKENVAREG: vop->arg1t = ARGVAREG; vop->reg1 = token1->data.reg & 0xff; break; case ZASTOKENVLREG: vop->arg1t = ARGVLREG; vop->reg1 = token1->data.reg & 0xff; break; case ZASTOKENIMMED: vop->arg1t = ARGIMMED; vop->args[0] = token1->val; len += sizeof(zasword_t); break; case ZASTOKENADR: vop->arg1t = ARGIMMED; sym = malloc(sizeof(struct zassymrec)); sym->name = (uint8_t *)strdup((char *)token1->data.sym.name); sym->adr = (uintptr_t)&op->args[0]; zasqueuesym(sym); len += sizeof(uintptr_t); break; default: fprintf(stderr, "invalid argument 1 of type %lx\n", token1->type); printtoken(token1); exit(1); break; } token2 = token1->next; zasfreetoken(token1); retval = token2; } vop->narg = len >> 3; if (narg == 1) { vop->arg2t = ARGNONE; } else if (narg == 2 && (token2)) { switch(token2->type) { case ZASTOKENVAREG: vop->arg2t = ARGVAREG; vop->reg2 = token2->data.reg & 0xff; break; case ZASTOKENVLREG: vop->arg2t = ARGVLREG; vop->reg2 = token2->data.reg & 0xff; break; default: fprintf(stderr, "invalid argument 2 of type %lx\n", token2->type); printtoken(token2); exit(1); break; } retval = token2->next; zasfreetoken(token2); } } else
/* initialise dungeon generator */ void cellinitdng(struct celldng *dng, long width, long height) { long num = width * height; long ncavemax = 16; long ncormax = 16; long n = rounduppow2(num, CHAR_BIT); char *map = calloc(n / CHAR_BIT, sizeof(char)); char *cormap = calloc(n / CHAR_BIT, sizeof(char)); struct cellcave **cavetab = calloc(ncavemax, sizeof(struct cellcave *)); long *idtab = malloc(num * sizeof(long)); struct cellcor **cortab = calloc(ncormax, sizeof(struct cellcor *)); long ndx; dngcavetab = calloc(width * height, sizeof(struct cellcave **)); if (!dngcavetab) { fprintf(stderr, "CELL: failed to allocate global cave table\n"); exit(1); } if (!map) { fprintf(stderr, "CELL: failed to allocate cave bitmap\n"); exit(1); } if (!cormap) { fprintf(stderr, "CELL: failed to allocate corridor bitmap\n"); exit(1); } if (!cavetab) { fprintf(stderr, "CELL: failed to allocate cave table\n"); exit(1); } if (!idtab) { fprintf(stderr, "CELL: failed to allocate cave ID table\n"); exit(1); } /* set random seed */ // dngsrand(~0L); dngsrand(0x55555555L); /* set cell owner-IDs (caves) to uninitialised */ for (ndx = 0 ; ndx < num ; ndx++) { idtab[ndx] = DNG_NOCAVE; } dng->map = map; dng->cormap = cormap; dng->caveidtab = idtab; /* initialise dungeon structure */ dng->width = width; dng->height = height; // dng->map = map; dng->ncormax = ncormax; dng->cortab = cortab; dng->ncavemax = ncavemax; dng->cavetab = cavetab; cellsetgenparm(dng, NULL); return; }